blob: 9769a4699cac82fe444a3c1216937d17aba5a242 [file] [log] [blame]
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7#include <linux/kernel.h>
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02008#include <linux/delay.h>
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02009#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmapool.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_device.h>
22#include <linux/of_irq.h>
23#include <linux/workqueue.h>
24#include <linux/completion.h>
25#include <linux/soc/ti/k3-ringacc.h>
26#include <linux/soc/ti/ti_sci_protocol.h>
27#include <linux/soc/ti/ti_sci_inta_msi.h>
28#include <linux/dma/ti-cppi5.h>
29
30#include "../virt-dma.h"
31#include "k3-udma.h"
32#include "k3-psil-priv.h"
33
34struct udma_static_tr {
35 u8 elsize; /* RPSTR0 */
36 u16 elcnt; /* RPSTR0 */
37 u16 bstcnt; /* RPSTR1 */
38};
39
40#define K3_UDMA_MAX_RFLOWS 1024
41#define K3_UDMA_DEFAULT_RING_SIZE 16
42
43/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44#define UDMA_RFLOW_SRCTAG_NONE 0
45#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
46#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
47#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
48
49#define UDMA_RFLOW_DSTTAG_NONE 0
50#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
51#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
52#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
53#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
54
55struct udma_chan;
56
57enum udma_mmr {
58 MMR_GCFG = 0,
59 MMR_RCHANRT,
60 MMR_TCHANRT,
61 MMR_LAST,
62};
63
64static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
65
66struct udma_tchan {
67 void __iomem *reg_rt;
68
69 int id;
70 struct k3_ring *t_ring; /* Transmit ring */
71 struct k3_ring *tc_ring; /* Transmit Completion ring */
72};
73
74struct udma_rflow {
75 int id;
76 struct k3_ring *fd_ring; /* Free Descriptor ring */
77 struct k3_ring *r_ring; /* Receive ring */
78};
79
80struct udma_rchan {
81 void __iomem *reg_rt;
82
83 int id;
84};
85
86#define UDMA_FLAG_PDMA_ACC32 BIT(0)
87#define UDMA_FLAG_PDMA_BURST BIT(1)
88
89struct udma_match_data {
90 u32 psil_base;
91 bool enable_memcpy_support;
92 u32 flags;
93 u32 statictr_z_mask;
94 u32 rchan_oes_offset;
95
96 u8 tpl_levels;
97 u32 level_start_idx[];
98};
99
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200100struct udma_hwdesc {
101 size_t cppi5_desc_size;
102 void *cppi5_desc_vaddr;
103 dma_addr_t cppi5_desc_paddr;
104
105 /* TR descriptor internal pointers */
106 void *tr_req_base;
107 struct cppi5_tr_resp_t *tr_resp_base;
108};
109
110struct udma_rx_flush {
111 struct udma_hwdesc hwdescs[2];
112
113 size_t buffer_size;
114 void *buffer_vaddr;
115 dma_addr_t buffer_paddr;
116};
117
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200118struct udma_dev {
119 struct dma_device ddev;
120 struct device *dev;
121 void __iomem *mmrs[MMR_LAST];
122 const struct udma_match_data *match_data;
123
124 size_t desc_align; /* alignment to use for descriptors */
125
126 struct udma_tisci_rm tisci_rm;
127
128 struct k3_ringacc *ringacc;
129
130 struct work_struct purge_work;
131 struct list_head desc_to_purge;
132 spinlock_t lock;
133
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200134 struct udma_rx_flush rx_flush;
135
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200136 int tchan_cnt;
137 int echan_cnt;
138 int rchan_cnt;
139 int rflow_cnt;
140 unsigned long *tchan_map;
141 unsigned long *rchan_map;
142 unsigned long *rflow_gp_map;
143 unsigned long *rflow_gp_map_allocated;
144 unsigned long *rflow_in_use;
145
146 struct udma_tchan *tchans;
147 struct udma_rchan *rchans;
148 struct udma_rflow *rflows;
149
150 struct udma_chan *channels;
151 u32 psil_base;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200152 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200153};
154
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200155struct udma_desc {
156 struct virt_dma_desc vd;
157
158 bool terminated;
159
160 enum dma_transfer_direction dir;
161
162 struct udma_static_tr static_tr;
163 u32 residue;
164
165 unsigned int sglen;
166 unsigned int desc_idx; /* Only used for cyclic in packet mode */
167 unsigned int tr_idx;
168
169 u32 metadata_size;
170 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
171
172 unsigned int hwdesc_count;
173 struct udma_hwdesc hwdesc[0];
174};
175
176enum udma_chan_state {
177 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
178 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
179 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
180};
181
182struct udma_tx_drain {
183 struct delayed_work work;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200184 ktime_t tstamp;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200185 u32 residue;
186};
187
188struct udma_chan_config {
189 bool pkt_mode; /* TR or packet */
190 bool needs_epib; /* EPIB is needed for the communication or not */
191 u32 psd_size; /* size of Protocol Specific Data */
192 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
193 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
194 bool notdpkt; /* Suppress sending TDC packet */
195 int remote_thread_id;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200196 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200197 u32 src_thread;
198 u32 dst_thread;
199 enum psil_endpoint_type ep_type;
200 bool enable_acc32;
201 bool enable_burst;
202 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
203
204 enum dma_transfer_direction dir;
205};
206
207struct udma_chan {
208 struct virt_dma_chan vc;
209 struct dma_slave_config cfg;
210 struct udma_dev *ud;
211 struct udma_desc *desc;
212 struct udma_desc *terminated_desc;
213 struct udma_static_tr static_tr;
214 char *name;
215
216 struct udma_tchan *tchan;
217 struct udma_rchan *rchan;
218 struct udma_rflow *rflow;
219
220 bool psil_paired;
221
222 int irq_num_ring;
223 int irq_num_udma;
224
225 bool cyclic;
226 bool paused;
227
228 enum udma_chan_state state;
229 struct completion teardown_completed;
230
231 struct udma_tx_drain tx_drain;
232
233 u32 bcnt; /* number of bytes completed since the start of the channel */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200234
235 /* Channel configuration parameters */
236 struct udma_chan_config config;
237
238 /* dmapool for packet mode descriptors */
239 bool use_dma_pool;
240 struct dma_pool *hdesc_pool;
241
242 u32 id;
243};
244
245static inline struct udma_dev *to_udma_dev(struct dma_device *d)
246{
247 return container_of(d, struct udma_dev, ddev);
248}
249
250static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
251{
252 return container_of(c, struct udma_chan, vc.chan);
253}
254
255static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
256{
257 return container_of(t, struct udma_desc, vd.tx);
258}
259
260/* Generic register access functions */
261static inline u32 udma_read(void __iomem *base, int reg)
262{
263 return readl(base + reg);
264}
265
266static inline void udma_write(void __iomem *base, int reg, u32 val)
267{
268 writel(val, base + reg);
269}
270
271static inline void udma_update_bits(void __iomem *base, int reg,
272 u32 mask, u32 val)
273{
274 u32 tmp, orig;
275
276 orig = readl(base + reg);
277 tmp = orig & ~mask;
278 tmp |= (val & mask);
279
280 if (tmp != orig)
281 writel(tmp, base + reg);
282}
283
284/* TCHANRT */
285static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
286{
287 if (!tchan)
288 return 0;
289 return udma_read(tchan->reg_rt, reg);
290}
291
292static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
293 u32 val)
294{
295 if (!tchan)
296 return;
297 udma_write(tchan->reg_rt, reg, val);
298}
299
300static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
301 u32 mask, u32 val)
302{
303 if (!tchan)
304 return;
305 udma_update_bits(tchan->reg_rt, reg, mask, val);
306}
307
308/* RCHANRT */
309static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
310{
311 if (!rchan)
312 return 0;
313 return udma_read(rchan->reg_rt, reg);
314}
315
316static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
317 u32 val)
318{
319 if (!rchan)
320 return;
321 udma_write(rchan->reg_rt, reg, val);
322}
323
324static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
325 u32 mask, u32 val)
326{
327 if (!rchan)
328 return;
329 udma_update_bits(rchan->reg_rt, reg, mask, val);
330}
331
332static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
333{
334 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
335
336 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
337 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
338 tisci_rm->tisci_navss_dev_id,
339 src_thread, dst_thread);
340}
341
342static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
343 u32 dst_thread)
344{
345 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
346
347 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
348 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
349 tisci_rm->tisci_navss_dev_id,
350 src_thread, dst_thread);
351}
352
353static void udma_reset_uchan(struct udma_chan *uc)
354{
355 memset(&uc->config, 0, sizeof(uc->config));
356 uc->config.remote_thread_id = -1;
357 uc->state = UDMA_CHAN_IS_IDLE;
358}
359
360static void udma_dump_chan_stdata(struct udma_chan *uc)
361{
362 struct device *dev = uc->ud->dev;
363 u32 offset;
364 int i;
365
366 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
367 dev_dbg(dev, "TCHAN State data:\n");
368 for (i = 0; i < 32; i++) {
369 offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
370 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
371 udma_tchanrt_read(uc->tchan, offset));
372 }
373 }
374
375 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
376 dev_dbg(dev, "RCHAN State data:\n");
377 for (i = 0; i < 32; i++) {
378 offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
379 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
380 udma_rchanrt_read(uc->rchan, offset));
381 }
382 }
383}
384
385static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
386 int idx)
387{
388 return d->hwdesc[idx].cppi5_desc_paddr;
389}
390
391static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
392{
393 return d->hwdesc[idx].cppi5_desc_vaddr;
394}
395
396static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
397 dma_addr_t paddr)
398{
399 struct udma_desc *d = uc->terminated_desc;
400
401 if (d) {
402 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
403 d->desc_idx);
404
405 if (desc_paddr != paddr)
406 d = NULL;
407 }
408
409 if (!d) {
410 d = uc->desc;
411 if (d) {
412 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
413 d->desc_idx);
414
415 if (desc_paddr != paddr)
416 d = NULL;
417 }
418 }
419
420 return d;
421}
422
423static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
424{
425 if (uc->use_dma_pool) {
426 int i;
427
428 for (i = 0; i < d->hwdesc_count; i++) {
429 if (!d->hwdesc[i].cppi5_desc_vaddr)
430 continue;
431
432 dma_pool_free(uc->hdesc_pool,
433 d->hwdesc[i].cppi5_desc_vaddr,
434 d->hwdesc[i].cppi5_desc_paddr);
435
436 d->hwdesc[i].cppi5_desc_vaddr = NULL;
437 }
438 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
439 struct udma_dev *ud = uc->ud;
440
441 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
442 d->hwdesc[0].cppi5_desc_vaddr,
443 d->hwdesc[0].cppi5_desc_paddr);
444
445 d->hwdesc[0].cppi5_desc_vaddr = NULL;
446 }
447}
448
449static void udma_purge_desc_work(struct work_struct *work)
450{
451 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
452 struct virt_dma_desc *vd, *_vd;
453 unsigned long flags;
454 LIST_HEAD(head);
455
456 spin_lock_irqsave(&ud->lock, flags);
457 list_splice_tail_init(&ud->desc_to_purge, &head);
458 spin_unlock_irqrestore(&ud->lock, flags);
459
460 list_for_each_entry_safe(vd, _vd, &head, node) {
461 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
462 struct udma_desc *d = to_udma_desc(&vd->tx);
463
464 udma_free_hwdesc(uc, d);
465 list_del(&vd->node);
466 kfree(d);
467 }
468
469 /* If more to purge, schedule the work again */
470 if (!list_empty(&ud->desc_to_purge))
471 schedule_work(&ud->purge_work);
472}
473
474static void udma_desc_free(struct virt_dma_desc *vd)
475{
476 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
477 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
478 struct udma_desc *d = to_udma_desc(&vd->tx);
479 unsigned long flags;
480
481 if (uc->terminated_desc == d)
482 uc->terminated_desc = NULL;
483
484 if (uc->use_dma_pool) {
485 udma_free_hwdesc(uc, d);
486 kfree(d);
487 return;
488 }
489
490 spin_lock_irqsave(&ud->lock, flags);
491 list_add_tail(&vd->node, &ud->desc_to_purge);
492 spin_unlock_irqrestore(&ud->lock, flags);
493
494 schedule_work(&ud->purge_work);
495}
496
497static bool udma_is_chan_running(struct udma_chan *uc)
498{
499 u32 trt_ctl = 0;
500 u32 rrt_ctl = 0;
501
502 if (uc->tchan)
503 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
504 if (uc->rchan)
505 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
506
507 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
508 return true;
509
510 return false;
511}
512
513static bool udma_is_chan_paused(struct udma_chan *uc)
514{
515 u32 val, pause_mask;
516
Peter Ujfalusic7450bb2020-02-14 11:14:40 +0200517 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200518 case DMA_DEV_TO_MEM:
519 val = udma_rchanrt_read(uc->rchan,
520 UDMA_RCHAN_RT_PEER_RT_EN_REG);
521 pause_mask = UDMA_PEER_RT_EN_PAUSE;
522 break;
523 case DMA_MEM_TO_DEV:
524 val = udma_tchanrt_read(uc->tchan,
525 UDMA_TCHAN_RT_PEER_RT_EN_REG);
526 pause_mask = UDMA_PEER_RT_EN_PAUSE;
527 break;
528 case DMA_MEM_TO_MEM:
529 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
530 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
531 break;
532 default:
533 return false;
534 }
535
536 if (val & pause_mask)
537 return true;
538
539 return false;
540}
541
542static void udma_sync_for_device(struct udma_chan *uc, int idx)
543{
544 struct udma_desc *d = uc->desc;
545
546 if (uc->cyclic && uc->config.pkt_mode) {
547 dma_sync_single_for_device(uc->ud->dev,
548 d->hwdesc[idx].cppi5_desc_paddr,
549 d->hwdesc[idx].cppi5_desc_size,
550 DMA_TO_DEVICE);
551 } else {
552 int i;
553
554 for (i = 0; i < d->hwdesc_count; i++) {
555 if (!d->hwdesc[i].cppi5_desc_vaddr)
556 continue;
557
558 dma_sync_single_for_device(uc->ud->dev,
559 d->hwdesc[i].cppi5_desc_paddr,
560 d->hwdesc[i].cppi5_desc_size,
561 DMA_TO_DEVICE);
562 }
563 }
564}
565
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200566static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
567{
568 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
569}
570
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200571static int udma_push_to_ring(struct udma_chan *uc, int idx)
572{
573 struct udma_desc *d = uc->desc;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200574 struct k3_ring *ring = NULL;
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200575 dma_addr_t paddr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200576
577 switch (uc->config.dir) {
578 case DMA_DEV_TO_MEM:
579 ring = uc->rflow->fd_ring;
580 break;
581 case DMA_MEM_TO_DEV:
582 case DMA_MEM_TO_MEM:
583 ring = uc->tchan->t_ring;
584 break;
585 default:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200586 return -EINVAL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200587 }
588
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200589 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
590 if (idx == -1) {
591 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
592 } else {
593 paddr = udma_curr_cppi5_desc_paddr(d, idx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200594
595 wmb(); /* Ensure that writes are not moved over this point */
596 udma_sync_for_device(uc, idx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200597 }
598
Peter Ujfalusi6fea8732020-05-12 16:46:11 +0300599 return k3_ringacc_ring_push(ring, &paddr);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200600}
601
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200602static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
603{
604 if (uc->config.dir != DMA_DEV_TO_MEM)
605 return false;
606
607 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
608 return true;
609
610 return false;
611}
612
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200613static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
614{
615 struct k3_ring *ring = NULL;
616 int ret = -ENOENT;
617
618 switch (uc->config.dir) {
619 case DMA_DEV_TO_MEM:
620 ring = uc->rflow->r_ring;
621 break;
622 case DMA_MEM_TO_DEV:
623 case DMA_MEM_TO_MEM:
624 ring = uc->tchan->tc_ring;
625 break;
626 default:
627 break;
628 }
629
630 if (ring && k3_ringacc_ring_get_occ(ring)) {
631 struct udma_desc *d = NULL;
632
633 ret = k3_ringacc_ring_pop(ring, addr);
634 if (ret)
635 return ret;
636
637 /* Teardown completion */
638 if (cppi5_desc_is_tdcm(*addr))
639 return ret;
640
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200641 /* Check for flush descriptor */
642 if (udma_desc_is_rx_flush(uc, *addr))
643 return -ENOENT;
644
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200645 d = udma_udma_desc_from_paddr(uc, *addr);
646
647 if (d)
648 dma_sync_single_for_cpu(uc->ud->dev, *addr,
649 d->hwdesc[0].cppi5_desc_size,
650 DMA_FROM_DEVICE);
651 rmb(); /* Ensure that reads are not moved before this point */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200652 }
653
654 return ret;
655}
656
657static void udma_reset_rings(struct udma_chan *uc)
658{
659 struct k3_ring *ring1 = NULL;
660 struct k3_ring *ring2 = NULL;
661
662 switch (uc->config.dir) {
663 case DMA_DEV_TO_MEM:
664 if (uc->rchan) {
665 ring1 = uc->rflow->fd_ring;
666 ring2 = uc->rflow->r_ring;
667 }
668 break;
669 case DMA_MEM_TO_DEV:
670 case DMA_MEM_TO_MEM:
671 if (uc->tchan) {
672 ring1 = uc->tchan->t_ring;
673 ring2 = uc->tchan->tc_ring;
674 }
675 break;
676 default:
677 break;
678 }
679
680 if (ring1)
681 k3_ringacc_ring_reset_dma(ring1,
682 k3_ringacc_ring_get_occ(ring1));
683 if (ring2)
684 k3_ringacc_ring_reset(ring2);
685
686 /* make sure we are not leaking memory by stalled descriptor */
687 if (uc->terminated_desc) {
688 udma_desc_free(&uc->terminated_desc->vd);
689 uc->terminated_desc = NULL;
690 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200691}
692
693static void udma_reset_counters(struct udma_chan *uc)
694{
695 u32 val;
696
697 if (uc->tchan) {
698 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
699 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
700
701 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
702 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
703
704 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
705 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
706
707 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
708 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
709 }
710
711 if (uc->rchan) {
712 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
713 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
714
715 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
716 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
717
718 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
719 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
720
721 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
722 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
723 }
724
725 uc->bcnt = 0;
726}
727
728static int udma_reset_chan(struct udma_chan *uc, bool hard)
729{
730 switch (uc->config.dir) {
731 case DMA_DEV_TO_MEM:
732 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
733 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
734 break;
735 case DMA_MEM_TO_DEV:
736 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
737 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
738 break;
739 case DMA_MEM_TO_MEM:
740 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
741 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
742 break;
743 default:
744 return -EINVAL;
745 }
746
747 /* Reset all counters */
748 udma_reset_counters(uc);
749
750 /* Hard reset: re-initialize the channel to reset */
751 if (hard) {
752 struct udma_chan_config ucc_backup;
753 int ret;
754
755 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
756 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
757
758 /* restore the channel configuration */
759 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
760 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
761 if (ret)
762 return ret;
763
764 /*
765 * Setting forced teardown after forced reset helps recovering
766 * the rchan.
767 */
768 if (uc->config.dir == DMA_DEV_TO_MEM)
769 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
770 UDMA_CHAN_RT_CTL_EN |
771 UDMA_CHAN_RT_CTL_TDOWN |
772 UDMA_CHAN_RT_CTL_FTDOWN);
773 }
774 uc->state = UDMA_CHAN_IS_IDLE;
775
776 return 0;
777}
778
779static void udma_start_desc(struct udma_chan *uc)
780{
781 struct udma_chan_config *ucc = &uc->config;
782
783 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
784 int i;
785
786 /* Push all descriptors to ring for packet mode cyclic or RX */
787 for (i = 0; i < uc->desc->sglen; i++)
788 udma_push_to_ring(uc, i);
789 } else {
790 udma_push_to_ring(uc, 0);
791 }
792}
793
794static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
795{
796 /* Only PDMAs have staticTR */
797 if (uc->config.ep_type == PSIL_EP_NATIVE)
798 return false;
799
800 /* Check if the staticTR configuration has changed for TX */
801 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
802 return true;
803
804 return false;
805}
806
807static int udma_start(struct udma_chan *uc)
808{
809 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
810
811 if (!vd) {
812 uc->desc = NULL;
813 return -ENOENT;
814 }
815
816 list_del(&vd->node);
817
818 uc->desc = to_udma_desc(&vd->tx);
819
820 /* Channel is already running and does not need reconfiguration */
821 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
822 udma_start_desc(uc);
823 goto out;
824 }
825
826 /* Make sure that we clear the teardown bit, if it is set */
827 udma_reset_chan(uc, false);
828
829 /* Push descriptors before we start the channel */
830 udma_start_desc(uc);
831
832 switch (uc->desc->dir) {
833 case DMA_DEV_TO_MEM:
834 /* Config remote TR */
835 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
836 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
837 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
838 const struct udma_match_data *match_data =
839 uc->ud->match_data;
840
841 if (uc->config.enable_acc32)
842 val |= PDMA_STATIC_TR_XY_ACC32;
843 if (uc->config.enable_burst)
844 val |= PDMA_STATIC_TR_XY_BURST;
845
846 udma_rchanrt_write(uc->rchan,
847 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
848
849 udma_rchanrt_write(uc->rchan,
850 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
851 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
852 match_data->statictr_z_mask));
853
854 /* save the current staticTR configuration */
855 memcpy(&uc->static_tr, &uc->desc->static_tr,
856 sizeof(uc->static_tr));
857 }
858
859 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
860 UDMA_CHAN_RT_CTL_EN);
861
862 /* Enable remote */
863 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
864 UDMA_PEER_RT_EN_ENABLE);
865
866 break;
867 case DMA_MEM_TO_DEV:
868 /* Config remote TR */
869 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
870 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
871 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
872
873 if (uc->config.enable_acc32)
874 val |= PDMA_STATIC_TR_XY_ACC32;
875 if (uc->config.enable_burst)
876 val |= PDMA_STATIC_TR_XY_BURST;
877
878 udma_tchanrt_write(uc->tchan,
879 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
880
881 /* save the current staticTR configuration */
882 memcpy(&uc->static_tr, &uc->desc->static_tr,
883 sizeof(uc->static_tr));
884 }
885
886 /* Enable remote */
887 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
888 UDMA_PEER_RT_EN_ENABLE);
889
890 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
891 UDMA_CHAN_RT_CTL_EN);
892
893 break;
894 case DMA_MEM_TO_MEM:
895 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
896 UDMA_CHAN_RT_CTL_EN);
897 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
898 UDMA_CHAN_RT_CTL_EN);
899
900 break;
901 default:
902 return -EINVAL;
903 }
904
905 uc->state = UDMA_CHAN_IS_ACTIVE;
906out:
907
908 return 0;
909}
910
911static int udma_stop(struct udma_chan *uc)
912{
913 enum udma_chan_state old_state = uc->state;
914
915 uc->state = UDMA_CHAN_IS_TERMINATING;
916 reinit_completion(&uc->teardown_completed);
917
918 switch (uc->config.dir) {
919 case DMA_DEV_TO_MEM:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200920 if (!uc->cyclic && !uc->desc)
921 udma_push_to_ring(uc, -1);
922
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200923 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
924 UDMA_PEER_RT_EN_ENABLE |
925 UDMA_PEER_RT_EN_TEARDOWN);
926 break;
927 case DMA_MEM_TO_DEV:
928 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
929 UDMA_PEER_RT_EN_ENABLE |
930 UDMA_PEER_RT_EN_FLUSH);
931 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
932 UDMA_CHAN_RT_CTL_EN |
933 UDMA_CHAN_RT_CTL_TDOWN);
934 break;
935 case DMA_MEM_TO_MEM:
936 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
937 UDMA_CHAN_RT_CTL_EN |
938 UDMA_CHAN_RT_CTL_TDOWN);
939 break;
940 default:
941 uc->state = old_state;
942 complete_all(&uc->teardown_completed);
943 return -EINVAL;
944 }
945
946 return 0;
947}
948
949static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
950{
951 struct udma_desc *d = uc->desc;
952 struct cppi5_host_desc_t *h_desc;
953
954 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
955 cppi5_hdesc_reset_to_original(h_desc);
956 udma_push_to_ring(uc, d->desc_idx);
957 d->desc_idx = (d->desc_idx + 1) % d->sglen;
958}
959
960static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
961{
962 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
963
964 memcpy(d->metadata, h_desc->epib, d->metadata_size);
965}
966
967static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
968{
969 u32 peer_bcnt, bcnt;
970
971 /* Only TX towards PDMA is affected */
972 if (uc->config.ep_type == PSIL_EP_NATIVE ||
973 uc->config.dir != DMA_MEM_TO_DEV)
974 return true;
975
976 peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
977 bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
978
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200979 /* Transfer is incomplete, store current residue and time stamp */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200980 if (peer_bcnt < bcnt) {
981 uc->tx_drain.residue = bcnt - peer_bcnt;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200982 uc->tx_drain.tstamp = ktime_get();
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200983 return false;
984 }
985
986 return true;
987}
988
989static void udma_check_tx_completion(struct work_struct *work)
990{
991 struct udma_chan *uc = container_of(work, typeof(*uc),
992 tx_drain.work.work);
993 bool desc_done = true;
994 u32 residue_diff;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200995 ktime_t time_diff;
996 unsigned long delay;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200997
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200998 while (1) {
999 if (uc->desc) {
1000 /* Get previous residue and time stamp */
1001 residue_diff = uc->tx_drain.residue;
1002 time_diff = uc->tx_drain.tstamp;
1003 /*
1004 * Get current residue and time stamp or see if
1005 * transfer is complete
1006 */
1007 desc_done = udma_is_desc_really_done(uc, uc->desc);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001008 }
1009
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02001010 if (!desc_done) {
1011 /*
1012 * Find the time delta and residue delta w.r.t
1013 * previous poll
1014 */
1015 time_diff = ktime_sub(uc->tx_drain.tstamp,
1016 time_diff) + 1;
1017 residue_diff -= uc->tx_drain.residue;
1018 if (residue_diff) {
1019 /*
1020 * Try to guess when we should check
1021 * next time by calculating rate at
1022 * which data is being drained at the
1023 * peer device
1024 */
1025 delay = (time_diff / residue_diff) *
1026 uc->tx_drain.residue;
1027 } else {
1028 /* No progress, check again in 1 second */
1029 schedule_delayed_work(&uc->tx_drain.work, HZ);
1030 break;
1031 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001032
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02001033 usleep_range(ktime_to_us(delay),
1034 ktime_to_us(delay) + 10);
1035 continue;
1036 }
1037
1038 if (uc->desc) {
1039 struct udma_desc *d = uc->desc;
1040
1041 uc->bcnt += d->residue;
1042 udma_start(uc);
1043 vchan_cookie_complete(&d->vd);
1044 break;
1045 }
1046
1047 break;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001048 }
1049}
1050
1051static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1052{
1053 struct udma_chan *uc = data;
1054 struct udma_desc *d;
1055 unsigned long flags;
1056 dma_addr_t paddr = 0;
1057
1058 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1059 return IRQ_HANDLED;
1060
1061 spin_lock_irqsave(&uc->vc.lock, flags);
1062
1063 /* Teardown completion message */
1064 if (cppi5_desc_is_tdcm(paddr)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001065 complete_all(&uc->teardown_completed);
1066
1067 if (uc->terminated_desc) {
1068 udma_desc_free(&uc->terminated_desc->vd);
1069 uc->terminated_desc = NULL;
1070 }
1071
1072 if (!uc->desc)
1073 udma_start(uc);
1074
1075 goto out;
1076 }
1077
1078 d = udma_udma_desc_from_paddr(uc, paddr);
1079
1080 if (d) {
1081 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1082 d->desc_idx);
1083 if (desc_paddr != paddr) {
1084 dev_err(uc->ud->dev, "not matching descriptors!\n");
1085 goto out;
1086 }
1087
Peter Ujfalusi83903182020-02-14 11:14:41 +02001088 if (d == uc->desc) {
1089 /* active descriptor */
1090 if (uc->cyclic) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001091 udma_cyclic_packet_elapsed(uc);
1092 vchan_cyclic_callback(&d->vd);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001093 } else {
1094 if (udma_is_desc_really_done(uc, d)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001095 uc->bcnt += d->residue;
1096 udma_start(uc);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001097 vchan_cookie_complete(&d->vd);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001098 } else {
1099 schedule_delayed_work(&uc->tx_drain.work,
1100 0);
1101 }
1102 }
Peter Ujfalusi83903182020-02-14 11:14:41 +02001103 } else {
1104 /*
1105 * terminated descriptor, mark the descriptor as
1106 * completed to update the channel's cookie marker
1107 */
1108 dma_cookie_complete(&d->vd.tx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001109 }
1110 }
1111out:
1112 spin_unlock_irqrestore(&uc->vc.lock, flags);
1113
1114 return IRQ_HANDLED;
1115}
1116
1117static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1118{
1119 struct udma_chan *uc = data;
1120 struct udma_desc *d;
1121 unsigned long flags;
1122
1123 spin_lock_irqsave(&uc->vc.lock, flags);
1124 d = uc->desc;
1125 if (d) {
1126 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1127
1128 if (uc->cyclic) {
1129 vchan_cyclic_callback(&d->vd);
1130 } else {
1131 /* TODO: figure out the real amount of data */
1132 uc->bcnt += d->residue;
1133 udma_start(uc);
1134 vchan_cookie_complete(&d->vd);
1135 }
1136 }
1137
1138 spin_unlock_irqrestore(&uc->vc.lock, flags);
1139
1140 return IRQ_HANDLED;
1141}
1142
Grygorii Strashkod7024192019-12-23 13:04:51 +02001143/**
1144 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1145 * @ud: UDMA device
1146 * @from: Start the search from this flow id number
1147 * @cnt: Number of consecutive flow ids to allocate
1148 *
1149 * Allocate range of RX flow ids for future use, those flows can be requested
1150 * only using explicit flow id number. if @from is set to -1 it will try to find
1151 * first free range. if @from is positive value it will force allocation only
1152 * of the specified range of flows.
1153 *
1154 * Returns -ENOMEM if can't find free range.
1155 * -EEXIST if requested range is busy.
1156 * -EINVAL if wrong input values passed.
1157 * Returns flow id on success.
1158 */
1159static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1160{
1161 int start, tmp_from;
1162 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1163
1164 tmp_from = from;
1165 if (tmp_from < 0)
1166 tmp_from = ud->rchan_cnt;
1167 /* default flows can't be allocated and accessible only by id */
1168 if (tmp_from < ud->rchan_cnt)
1169 return -EINVAL;
1170
1171 if (tmp_from + cnt > ud->rflow_cnt)
1172 return -EINVAL;
1173
1174 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1175 ud->rflow_cnt);
1176
1177 start = bitmap_find_next_zero_area(tmp,
1178 ud->rflow_cnt,
1179 tmp_from, cnt, 0);
1180 if (start >= ud->rflow_cnt)
1181 return -ENOMEM;
1182
1183 if (from >= 0 && start != from)
1184 return -EEXIST;
1185
1186 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1187 return start;
1188}
1189
1190static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1191{
1192 if (from < ud->rchan_cnt)
1193 return -EINVAL;
1194 if (from + cnt > ud->rflow_cnt)
1195 return -EINVAL;
1196
1197 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1198 return 0;
1199}
1200
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001201static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1202{
1203 /*
1204 * Attempt to request rflow by ID can be made for any rflow
1205 * if not in use with assumption that caller knows what's doing.
1206 * TI-SCI FW will perform additional permission check ant way, it's
1207 * safe
1208 */
1209
1210 if (id < 0 || id >= ud->rflow_cnt)
1211 return ERR_PTR(-ENOENT);
1212
1213 if (test_bit(id, ud->rflow_in_use))
1214 return ERR_PTR(-ENOENT);
1215
1216 /* GP rflow has to be allocated first */
1217 if (!test_bit(id, ud->rflow_gp_map) &&
1218 !test_bit(id, ud->rflow_gp_map_allocated))
1219 return ERR_PTR(-EINVAL);
1220
1221 dev_dbg(ud->dev, "get rflow%d\n", id);
1222 set_bit(id, ud->rflow_in_use);
1223 return &ud->rflows[id];
1224}
1225
1226static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1227{
1228 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1229 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1230 return;
1231 }
1232
1233 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1234 clear_bit(rflow->id, ud->rflow_in_use);
1235}
1236
1237#define UDMA_RESERVE_RESOURCE(res) \
1238static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1239 enum udma_tp_level tpl, \
1240 int id) \
1241{ \
1242 if (id >= 0) { \
1243 if (test_bit(id, ud->res##_map)) { \
1244 dev_err(ud->dev, "res##%d is in use\n", id); \
1245 return ERR_PTR(-ENOENT); \
1246 } \
1247 } else { \
1248 int start; \
1249 \
1250 if (tpl >= ud->match_data->tpl_levels) \
1251 tpl = ud->match_data->tpl_levels - 1; \
1252 \
1253 start = ud->match_data->level_start_idx[tpl]; \
1254 \
1255 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1256 start); \
1257 if (id == ud->res##_cnt) { \
1258 return ERR_PTR(-ENOENT); \
1259 } \
1260 } \
1261 \
1262 set_bit(id, ud->res##_map); \
1263 return &ud->res##s[id]; \
1264}
1265
1266UDMA_RESERVE_RESOURCE(tchan);
1267UDMA_RESERVE_RESOURCE(rchan);
1268
1269static int udma_get_tchan(struct udma_chan *uc)
1270{
1271 struct udma_dev *ud = uc->ud;
1272
1273 if (uc->tchan) {
1274 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1275 uc->id, uc->tchan->id);
1276 return 0;
1277 }
1278
1279 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001280
Samuel Zou214a0002020-05-06 17:25:46 +08001281 return PTR_ERR_OR_ZERO(uc->tchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001282}
1283
1284static int udma_get_rchan(struct udma_chan *uc)
1285{
1286 struct udma_dev *ud = uc->ud;
1287
1288 if (uc->rchan) {
1289 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1290 uc->id, uc->rchan->id);
1291 return 0;
1292 }
1293
1294 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001295
Samuel Zou214a0002020-05-06 17:25:46 +08001296 return PTR_ERR_OR_ZERO(uc->rchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001297}
1298
1299static int udma_get_chan_pair(struct udma_chan *uc)
1300{
1301 struct udma_dev *ud = uc->ud;
1302 const struct udma_match_data *match_data = ud->match_data;
1303 int chan_id, end;
1304
1305 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1306 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1307 uc->id, uc->tchan->id);
1308 return 0;
1309 }
1310
1311 if (uc->tchan) {
1312 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1313 uc->id, uc->tchan->id);
1314 return -EBUSY;
1315 } else if (uc->rchan) {
1316 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1317 uc->id, uc->rchan->id);
1318 return -EBUSY;
1319 }
1320
1321 /* Can be optimized, but let's have it like this for now */
1322 end = min(ud->tchan_cnt, ud->rchan_cnt);
1323 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1324 chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
1325 for (; chan_id < end; chan_id++) {
1326 if (!test_bit(chan_id, ud->tchan_map) &&
1327 !test_bit(chan_id, ud->rchan_map))
1328 break;
1329 }
1330
1331 if (chan_id == end)
1332 return -ENOENT;
1333
1334 set_bit(chan_id, ud->tchan_map);
1335 set_bit(chan_id, ud->rchan_map);
1336 uc->tchan = &ud->tchans[chan_id];
1337 uc->rchan = &ud->rchans[chan_id];
1338
1339 return 0;
1340}
1341
1342static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1343{
1344 struct udma_dev *ud = uc->ud;
1345
1346 if (!uc->rchan) {
1347 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1348 return -EINVAL;
1349 }
1350
1351 if (uc->rflow) {
1352 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1353 uc->id, uc->rflow->id);
1354 return 0;
1355 }
1356
1357 uc->rflow = __udma_get_rflow(ud, flow_id);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001358
Samuel Zou214a0002020-05-06 17:25:46 +08001359 return PTR_ERR_OR_ZERO(uc->rflow);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001360}
1361
1362static void udma_put_rchan(struct udma_chan *uc)
1363{
1364 struct udma_dev *ud = uc->ud;
1365
1366 if (uc->rchan) {
1367 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1368 uc->rchan->id);
1369 clear_bit(uc->rchan->id, ud->rchan_map);
1370 uc->rchan = NULL;
1371 }
1372}
1373
1374static void udma_put_tchan(struct udma_chan *uc)
1375{
1376 struct udma_dev *ud = uc->ud;
1377
1378 if (uc->tchan) {
1379 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1380 uc->tchan->id);
1381 clear_bit(uc->tchan->id, ud->tchan_map);
1382 uc->tchan = NULL;
1383 }
1384}
1385
1386static void udma_put_rflow(struct udma_chan *uc)
1387{
1388 struct udma_dev *ud = uc->ud;
1389
1390 if (uc->rflow) {
1391 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1392 uc->rflow->id);
1393 __udma_put_rflow(ud, uc->rflow);
1394 uc->rflow = NULL;
1395 }
1396}
1397
1398static void udma_free_tx_resources(struct udma_chan *uc)
1399{
1400 if (!uc->tchan)
1401 return;
1402
1403 k3_ringacc_ring_free(uc->tchan->t_ring);
1404 k3_ringacc_ring_free(uc->tchan->tc_ring);
1405 uc->tchan->t_ring = NULL;
1406 uc->tchan->tc_ring = NULL;
1407
1408 udma_put_tchan(uc);
1409}
1410
1411static int udma_alloc_tx_resources(struct udma_chan *uc)
1412{
1413 struct k3_ring_cfg ring_cfg;
1414 struct udma_dev *ud = uc->ud;
1415 int ret;
1416
1417 ret = udma_get_tchan(uc);
1418 if (ret)
1419 return ret;
1420
1421 uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
1422 uc->tchan->id, 0);
1423 if (!uc->tchan->t_ring) {
1424 ret = -EBUSY;
1425 goto err_tx_ring;
1426 }
1427
1428 uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1429 if (!uc->tchan->tc_ring) {
1430 ret = -EBUSY;
1431 goto err_txc_ring;
1432 }
1433
1434 memset(&ring_cfg, 0, sizeof(ring_cfg));
1435 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1436 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1437 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1438
1439 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1440 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1441
1442 if (ret)
1443 goto err_ringcfg;
1444
1445 return 0;
1446
1447err_ringcfg:
1448 k3_ringacc_ring_free(uc->tchan->tc_ring);
1449 uc->tchan->tc_ring = NULL;
1450err_txc_ring:
1451 k3_ringacc_ring_free(uc->tchan->t_ring);
1452 uc->tchan->t_ring = NULL;
1453err_tx_ring:
1454 udma_put_tchan(uc);
1455
1456 return ret;
1457}
1458
1459static void udma_free_rx_resources(struct udma_chan *uc)
1460{
1461 if (!uc->rchan)
1462 return;
1463
1464 if (uc->rflow) {
1465 struct udma_rflow *rflow = uc->rflow;
1466
1467 k3_ringacc_ring_free(rflow->fd_ring);
1468 k3_ringacc_ring_free(rflow->r_ring);
1469 rflow->fd_ring = NULL;
1470 rflow->r_ring = NULL;
1471
1472 udma_put_rflow(uc);
1473 }
1474
1475 udma_put_rchan(uc);
1476}
1477
1478static int udma_alloc_rx_resources(struct udma_chan *uc)
1479{
1480 struct udma_dev *ud = uc->ud;
1481 struct k3_ring_cfg ring_cfg;
1482 struct udma_rflow *rflow;
1483 int fd_ring_id;
1484 int ret;
1485
1486 ret = udma_get_rchan(uc);
1487 if (ret)
1488 return ret;
1489
1490 /* For MEM_TO_MEM we don't need rflow or rings */
1491 if (uc->config.dir == DMA_MEM_TO_MEM)
1492 return 0;
1493
1494 ret = udma_get_rflow(uc, uc->rchan->id);
1495 if (ret) {
1496 ret = -EBUSY;
1497 goto err_rflow;
1498 }
1499
1500 rflow = uc->rflow;
1501 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1502 rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
1503 if (!rflow->fd_ring) {
1504 ret = -EBUSY;
1505 goto err_rx_ring;
1506 }
1507
1508 rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1509 if (!rflow->r_ring) {
1510 ret = -EBUSY;
1511 goto err_rxc_ring;
1512 }
1513
1514 memset(&ring_cfg, 0, sizeof(ring_cfg));
1515
1516 if (uc->config.pkt_mode)
1517 ring_cfg.size = SG_MAX_SEGMENTS;
1518 else
1519 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1520
1521 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1522 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1523
1524 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1525 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1526 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1527
1528 if (ret)
1529 goto err_ringcfg;
1530
1531 return 0;
1532
1533err_ringcfg:
1534 k3_ringacc_ring_free(rflow->r_ring);
1535 rflow->r_ring = NULL;
1536err_rxc_ring:
1537 k3_ringacc_ring_free(rflow->fd_ring);
1538 rflow->fd_ring = NULL;
1539err_rx_ring:
1540 udma_put_rflow(uc);
1541err_rflow:
1542 udma_put_rchan(uc);
1543
1544 return ret;
1545}
1546
1547#define TISCI_TCHAN_VALID_PARAMS ( \
1548 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1549 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1550 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1551 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1552 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1553 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001554 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1555 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001556
1557#define TISCI_RCHAN_VALID_PARAMS ( \
1558 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1559 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1560 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1561 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1562 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1563 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1564 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001565 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1566 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001567
1568static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1569{
1570 struct udma_dev *ud = uc->ud;
1571 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1572 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1573 struct udma_tchan *tchan = uc->tchan;
1574 struct udma_rchan *rchan = uc->rchan;
1575 int ret = 0;
1576
1577 /* Non synchronized - mem to mem type of transfer */
1578 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1579 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1580 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1581
1582 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1583 req_tx.nav_id = tisci_rm->tisci_dev_id;
1584 req_tx.index = tchan->id;
1585 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1586 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1587 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001588 req_tx.tx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001589
1590 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1591 if (ret) {
1592 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1593 return ret;
1594 }
1595
1596 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1597 req_rx.nav_id = tisci_rm->tisci_dev_id;
1598 req_rx.index = rchan->id;
1599 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1600 req_rx.rxcq_qnum = tc_ring;
1601 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001602 req_rx.rx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001603
1604 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1605 if (ret)
1606 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1607
1608 return ret;
1609}
1610
1611static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1612{
1613 struct udma_dev *ud = uc->ud;
1614 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1615 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1616 struct udma_tchan *tchan = uc->tchan;
1617 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1618 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1619 u32 mode, fetch_size;
1620 int ret = 0;
1621
1622 if (uc->config.pkt_mode) {
1623 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1624 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1625 uc->config.psd_size, 0);
1626 } else {
1627 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1628 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1629 }
1630
1631 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1632 req_tx.nav_id = tisci_rm->tisci_dev_id;
1633 req_tx.index = tchan->id;
1634 req_tx.tx_chan_type = mode;
1635 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1636 req_tx.tx_fetch_size = fetch_size >> 2;
1637 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001638 req_tx.tx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001639
1640 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1641 if (ret)
1642 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1643
1644 return ret;
1645}
1646
1647static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1648{
1649 struct udma_dev *ud = uc->ud;
1650 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1651 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1652 struct udma_rchan *rchan = uc->rchan;
1653 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1654 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1655 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1656 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1657 u32 mode, fetch_size;
1658 int ret = 0;
1659
1660 if (uc->config.pkt_mode) {
1661 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1662 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1663 uc->config.psd_size, 0);
1664 } else {
1665 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1666 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1667 }
1668
1669 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1670 req_rx.nav_id = tisci_rm->tisci_dev_id;
1671 req_rx.index = rchan->id;
1672 req_rx.rx_fetch_size = fetch_size >> 2;
1673 req_rx.rxcq_qnum = rx_ring;
1674 req_rx.rx_chan_type = mode;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001675 req_rx.rx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001676
1677 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1678 if (ret) {
1679 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1680 return ret;
1681 }
1682
1683 flow_req.valid_params =
1684 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1685 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1686 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1687 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1688 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1689 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1690 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1691 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1692 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1693 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1694 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1695 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1696 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1697
1698 flow_req.nav_id = tisci_rm->tisci_dev_id;
1699 flow_req.flow_index = rchan->id;
1700
1701 if (uc->config.needs_epib)
1702 flow_req.rx_einfo_present = 1;
1703 else
1704 flow_req.rx_einfo_present = 0;
1705 if (uc->config.psd_size)
1706 flow_req.rx_psinfo_present = 1;
1707 else
1708 flow_req.rx_psinfo_present = 0;
1709 flow_req.rx_error_handling = 1;
1710 flow_req.rx_dest_qnum = rx_ring;
1711 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1712 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1713 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1714 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1715 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1716 flow_req.rx_fdq1_qnum = fd_ring;
1717 flow_req.rx_fdq2_qnum = fd_ring;
1718 flow_req.rx_fdq3_qnum = fd_ring;
1719
1720 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1721
1722 if (ret)
1723 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1724
1725 return 0;
1726}
1727
1728static int udma_alloc_chan_resources(struct dma_chan *chan)
1729{
1730 struct udma_chan *uc = to_udma_chan(chan);
1731 struct udma_dev *ud = to_udma_dev(chan->device);
1732 const struct udma_match_data *match_data = ud->match_data;
1733 struct k3_ring *irq_ring;
1734 u32 irq_udma_idx;
1735 int ret;
1736
1737 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1738 uc->use_dma_pool = true;
1739 /* in case of MEM_TO_MEM we have maximum of two TRs */
1740 if (uc->config.dir == DMA_MEM_TO_MEM) {
1741 uc->config.hdesc_size = cppi5_trdesc_calc_size(
1742 sizeof(struct cppi5_tr_type15_t), 2);
1743 uc->config.pkt_mode = false;
1744 }
1745 }
1746
1747 if (uc->use_dma_pool) {
1748 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1749 uc->config.hdesc_size,
1750 ud->desc_align,
1751 0);
1752 if (!uc->hdesc_pool) {
1753 dev_err(ud->ddev.dev,
1754 "Descriptor pool allocation failed\n");
1755 uc->use_dma_pool = false;
1756 return -ENOMEM;
1757 }
1758 }
1759
1760 /*
1761 * Make sure that the completion is in a known state:
1762 * No teardown, the channel is idle
1763 */
1764 reinit_completion(&uc->teardown_completed);
1765 complete_all(&uc->teardown_completed);
1766 uc->state = UDMA_CHAN_IS_IDLE;
1767
1768 switch (uc->config.dir) {
1769 case DMA_MEM_TO_MEM:
1770 /* Non synchronized - mem to mem type of transfer */
1771 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1772 uc->id);
1773
1774 ret = udma_get_chan_pair(uc);
1775 if (ret)
1776 return ret;
1777
1778 ret = udma_alloc_tx_resources(uc);
1779 if (ret)
1780 return ret;
1781
1782 ret = udma_alloc_rx_resources(uc);
1783 if (ret) {
1784 udma_free_tx_resources(uc);
1785 return ret;
1786 }
1787
1788 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1789 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1790 K3_PSIL_DST_THREAD_ID_OFFSET;
1791
1792 irq_ring = uc->tchan->tc_ring;
1793 irq_udma_idx = uc->tchan->id;
1794
1795 ret = udma_tisci_m2m_channel_config(uc);
1796 break;
1797 case DMA_MEM_TO_DEV:
1798 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1799 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1800 uc->id);
1801
1802 ret = udma_alloc_tx_resources(uc);
1803 if (ret) {
1804 uc->config.remote_thread_id = -1;
1805 return ret;
1806 }
1807
1808 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1809 uc->config.dst_thread = uc->config.remote_thread_id;
1810 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1811
1812 irq_ring = uc->tchan->tc_ring;
1813 irq_udma_idx = uc->tchan->id;
1814
1815 ret = udma_tisci_tx_channel_config(uc);
1816 break;
1817 case DMA_DEV_TO_MEM:
1818 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1819 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1820 uc->id);
1821
1822 ret = udma_alloc_rx_resources(uc);
1823 if (ret) {
1824 uc->config.remote_thread_id = -1;
1825 return ret;
1826 }
1827
1828 uc->config.src_thread = uc->config.remote_thread_id;
1829 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1830 K3_PSIL_DST_THREAD_ID_OFFSET;
1831
1832 irq_ring = uc->rflow->r_ring;
1833 irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
1834
1835 ret = udma_tisci_rx_channel_config(uc);
1836 break;
1837 default:
1838 /* Can not happen */
1839 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1840 __func__, uc->id, uc->config.dir);
1841 return -EINVAL;
1842 }
1843
1844 /* check if the channel configuration was successful */
1845 if (ret)
1846 goto err_res_free;
1847
1848 if (udma_is_chan_running(uc)) {
1849 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1850 udma_stop(uc);
1851 if (udma_is_chan_running(uc)) {
1852 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
Peter Ujfalusi7ae6d7b2020-05-12 16:45:19 +03001853 ret = -EBUSY;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001854 goto err_res_free;
1855 }
1856 }
1857
1858 /* PSI-L pairing */
1859 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1860 if (ret) {
1861 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1862 uc->config.src_thread, uc->config.dst_thread);
1863 goto err_res_free;
1864 }
1865
1866 uc->psil_paired = true;
1867
1868 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1869 if (uc->irq_num_ring <= 0) {
1870 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1871 k3_ringacc_get_ring_id(irq_ring));
1872 ret = -EINVAL;
1873 goto err_psi_free;
1874 }
1875
1876 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1877 IRQF_TRIGGER_HIGH, uc->name, uc);
1878 if (ret) {
1879 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1880 goto err_irq_free;
1881 }
1882
1883 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1884 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1885 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1886 irq_udma_idx);
1887 if (uc->irq_num_udma <= 0) {
1888 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1889 irq_udma_idx);
1890 free_irq(uc->irq_num_ring, uc);
1891 ret = -EINVAL;
1892 goto err_irq_free;
1893 }
1894
1895 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1896 uc->name, uc);
1897 if (ret) {
1898 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1899 uc->id);
1900 free_irq(uc->irq_num_ring, uc);
1901 goto err_irq_free;
1902 }
1903 } else {
1904 uc->irq_num_udma = 0;
1905 }
1906
1907 udma_reset_rings(uc);
1908
1909 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
1910 udma_check_tx_completion);
1911 return 0;
1912
1913err_irq_free:
1914 uc->irq_num_ring = 0;
1915 uc->irq_num_udma = 0;
1916err_psi_free:
1917 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1918 uc->psil_paired = false;
1919err_res_free:
1920 udma_free_tx_resources(uc);
1921 udma_free_rx_resources(uc);
1922
1923 udma_reset_uchan(uc);
1924
1925 if (uc->use_dma_pool) {
1926 dma_pool_destroy(uc->hdesc_pool);
1927 uc->use_dma_pool = false;
1928 }
1929
1930 return ret;
1931}
1932
1933static int udma_slave_config(struct dma_chan *chan,
1934 struct dma_slave_config *cfg)
1935{
1936 struct udma_chan *uc = to_udma_chan(chan);
1937
1938 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1939
1940 return 0;
1941}
1942
1943static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1944 size_t tr_size, int tr_count,
1945 enum dma_transfer_direction dir)
1946{
1947 struct udma_hwdesc *hwdesc;
1948 struct cppi5_desc_hdr_t *tr_desc;
1949 struct udma_desc *d;
1950 u32 reload_count = 0;
1951 u32 ring_id;
1952
1953 switch (tr_size) {
1954 case 16:
1955 case 32:
1956 case 64:
1957 case 128:
1958 break;
1959 default:
1960 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1961 return NULL;
1962 }
1963
1964 /* We have only one descriptor containing multiple TRs */
1965 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1966 if (!d)
1967 return NULL;
1968
1969 d->sglen = tr_count;
1970
1971 d->hwdesc_count = 1;
1972 hwdesc = &d->hwdesc[0];
1973
1974 /* Allocate memory for DMA ring descriptor */
1975 if (uc->use_dma_pool) {
1976 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1977 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1978 GFP_NOWAIT,
1979 &hwdesc->cppi5_desc_paddr);
1980 } else {
1981 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1982 tr_count);
1983 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1984 uc->ud->desc_align);
1985 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1986 hwdesc->cppi5_desc_size,
1987 &hwdesc->cppi5_desc_paddr,
1988 GFP_NOWAIT);
1989 }
1990
1991 if (!hwdesc->cppi5_desc_vaddr) {
1992 kfree(d);
1993 return NULL;
1994 }
1995
1996 /* Start of the TR req records */
1997 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
1998 /* Start address of the TR response array */
1999 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2000
2001 tr_desc = hwdesc->cppi5_desc_vaddr;
2002
2003 if (uc->cyclic)
2004 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2005
2006 if (dir == DMA_DEV_TO_MEM)
2007 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2008 else
2009 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2010
2011 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2012 cppi5_desc_set_pktids(tr_desc, uc->id,
2013 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2014 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2015
2016 return d;
2017}
2018
Peter Ujfalusia9793402020-02-14 11:14:38 +02002019/**
2020 * udma_get_tr_counters - calculate TR counters for a given length
2021 * @len: Length of the trasnfer
2022 * @align_to: Preferred alignment
2023 * @tr0_cnt0: First TR icnt0
2024 * @tr0_cnt1: First TR icnt1
2025 * @tr1_cnt0: Second (if used) TR icnt0
2026 *
2027 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2028 * For len >= SZ_64K two TRs are used in a simple way:
2029 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2030 * Second TR: the remaining length (tr1_cnt0)
2031 *
2032 * Returns the number of TRs the length needs (1 or 2)
2033 * -EINVAL if the length can not be supported
2034 */
2035static int udma_get_tr_counters(size_t len, unsigned long align_to,
2036 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2037{
2038 if (len < SZ_64K) {
2039 *tr0_cnt0 = len;
2040 *tr0_cnt1 = 1;
2041
2042 return 1;
2043 }
2044
2045 if (align_to > 3)
2046 align_to = 3;
2047
2048realign:
2049 *tr0_cnt0 = SZ_64K - BIT(align_to);
2050 if (len / *tr0_cnt0 >= SZ_64K) {
2051 if (align_to) {
2052 align_to--;
2053 goto realign;
2054 }
2055 return -EINVAL;
2056 }
2057
2058 *tr0_cnt1 = len / *tr0_cnt0;
2059 *tr1_cnt0 = len % *tr0_cnt0;
2060
2061 return 2;
2062}
2063
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002064static struct udma_desc *
2065udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2066 unsigned int sglen, enum dma_transfer_direction dir,
2067 unsigned long tx_flags, void *context)
2068{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002069 struct scatterlist *sgent;
2070 struct udma_desc *d;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002071 struct cppi5_tr_type1_t *tr_req = NULL;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002072 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002073 unsigned int i;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002074 size_t tr_size;
2075 int num_tr = 0;
2076 int tr_idx = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002077
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002078 if (!is_slave_direction(dir)) {
2079 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002080 return NULL;
2081 }
2082
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002083 /* estimate the number of TRs we will need */
2084 for_each_sg(sgl, sgent, sglen, i) {
2085 if (sg_dma_len(sgent) < SZ_64K)
2086 num_tr++;
2087 else
2088 num_tr += 2;
2089 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002090
2091 /* Now allocate and setup the descriptor. */
2092 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002093 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002094 if (!d)
2095 return NULL;
2096
2097 d->sglen = sglen;
2098
2099 tr_req = d->hwdesc[0].tr_req_base;
2100 for_each_sg(sgl, sgent, sglen, i) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002101 dma_addr_t sg_addr = sg_dma_address(sgent);
2102
2103 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2104 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2105 if (num_tr < 0) {
2106 dev_err(uc->ud->dev, "size %u is not supported\n",
2107 sg_dma_len(sgent));
2108 udma_free_hwdesc(uc, d);
2109 kfree(d);
2110 return NULL;
2111 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002112
2113 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2114 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2115 cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
2116
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002117 tr_req[tr_idx].addr = sg_addr;
2118 tr_req[tr_idx].icnt0 = tr0_cnt0;
2119 tr_req[tr_idx].icnt1 = tr0_cnt1;
2120 tr_req[tr_idx].dim1 = tr0_cnt0;
2121 tr_idx++;
2122
2123 if (num_tr == 2) {
2124 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2125 false, false,
2126 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2127 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2128 CPPI5_TR_CSF_SUPR_EVT);
2129
2130 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2131 tr_req[tr_idx].icnt0 = tr1_cnt0;
2132 tr_req[tr_idx].icnt1 = 1;
2133 tr_req[tr_idx].dim1 = tr1_cnt0;
2134 tr_idx++;
2135 }
2136
2137 d->residue += sg_dma_len(sgent);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002138 }
2139
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002140 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002141
2142 return d;
2143}
2144
2145static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2146 enum dma_slave_buswidth dev_width,
2147 u16 elcnt)
2148{
2149 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2150 return 0;
2151
2152 /* Bus width translates to the element size (ES) */
2153 switch (dev_width) {
2154 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2155 d->static_tr.elsize = 0;
2156 break;
2157 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2158 d->static_tr.elsize = 1;
2159 break;
2160 case DMA_SLAVE_BUSWIDTH_3_BYTES:
2161 d->static_tr.elsize = 2;
2162 break;
2163 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2164 d->static_tr.elsize = 3;
2165 break;
2166 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2167 d->static_tr.elsize = 4;
2168 break;
2169 default: /* not reached */
2170 return -EINVAL;
2171 }
2172
2173 d->static_tr.elcnt = elcnt;
2174
2175 /*
2176 * PDMA must to close the packet when the channel is in packet mode.
2177 * For TR mode when the channel is not cyclic we also need PDMA to close
2178 * the packet otherwise the transfer will stall because PDMA holds on
2179 * the data it has received from the peripheral.
2180 */
2181 if (uc->config.pkt_mode || !uc->cyclic) {
2182 unsigned int div = dev_width * elcnt;
2183
2184 if (uc->cyclic)
2185 d->static_tr.bstcnt = d->residue / d->sglen / div;
2186 else
2187 d->static_tr.bstcnt = d->residue / div;
2188
2189 if (uc->config.dir == DMA_DEV_TO_MEM &&
2190 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2191 return -EINVAL;
2192 } else {
2193 d->static_tr.bstcnt = 0;
2194 }
2195
2196 return 0;
2197}
2198
2199static struct udma_desc *
2200udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2201 unsigned int sglen, enum dma_transfer_direction dir,
2202 unsigned long tx_flags, void *context)
2203{
2204 struct scatterlist *sgent;
2205 struct cppi5_host_desc_t *h_desc = NULL;
2206 struct udma_desc *d;
2207 u32 ring_id;
2208 unsigned int i;
2209
2210 d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2211 if (!d)
2212 return NULL;
2213
2214 d->sglen = sglen;
2215 d->hwdesc_count = sglen;
2216
2217 if (dir == DMA_DEV_TO_MEM)
2218 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2219 else
2220 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2221
2222 for_each_sg(sgl, sgent, sglen, i) {
2223 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2224 dma_addr_t sg_addr = sg_dma_address(sgent);
2225 struct cppi5_host_desc_t *desc;
2226 size_t sg_len = sg_dma_len(sgent);
2227
2228 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2229 GFP_NOWAIT,
2230 &hwdesc->cppi5_desc_paddr);
2231 if (!hwdesc->cppi5_desc_vaddr) {
2232 dev_err(uc->ud->dev,
2233 "descriptor%d allocation failed\n", i);
2234
2235 udma_free_hwdesc(uc, d);
2236 kfree(d);
2237 return NULL;
2238 }
2239
2240 d->residue += sg_len;
2241 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2242 desc = hwdesc->cppi5_desc_vaddr;
2243
2244 if (i == 0) {
2245 cppi5_hdesc_init(desc, 0, 0);
2246 /* Flow and Packed ID */
2247 cppi5_desc_set_pktids(&desc->hdr, uc->id,
2248 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2249 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2250 } else {
2251 cppi5_hdesc_reset_hbdesc(desc);
2252 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2253 }
2254
2255 /* attach the sg buffer to the descriptor */
2256 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2257
2258 /* Attach link as host buffer descriptor */
2259 if (h_desc)
2260 cppi5_hdesc_link_hbdesc(h_desc,
2261 hwdesc->cppi5_desc_paddr);
2262
2263 if (dir == DMA_MEM_TO_DEV)
2264 h_desc = desc;
2265 }
2266
2267 if (d->residue >= SZ_4M) {
2268 dev_err(uc->ud->dev,
2269 "%s: Transfer size %u is over the supported 4M range\n",
2270 __func__, d->residue);
2271 udma_free_hwdesc(uc, d);
2272 kfree(d);
2273 return NULL;
2274 }
2275
2276 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2277 cppi5_hdesc_set_pktlen(h_desc, d->residue);
2278
2279 return d;
2280}
2281
2282static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2283 void *data, size_t len)
2284{
2285 struct udma_desc *d = to_udma_desc(desc);
2286 struct udma_chan *uc = to_udma_chan(desc->chan);
2287 struct cppi5_host_desc_t *h_desc;
2288 u32 psd_size = len;
2289 u32 flags = 0;
2290
2291 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2292 return -ENOTSUPP;
2293
2294 if (!data || len > uc->config.metadata_size)
2295 return -EINVAL;
2296
2297 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2298 return -EINVAL;
2299
2300 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2301 if (d->dir == DMA_MEM_TO_DEV)
2302 memcpy(h_desc->epib, data, len);
2303
2304 if (uc->config.needs_epib)
2305 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2306
2307 d->metadata = data;
2308 d->metadata_size = len;
2309 if (uc->config.needs_epib)
2310 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2311
2312 cppi5_hdesc_update_flags(h_desc, flags);
2313 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2314
2315 return 0;
2316}
2317
2318static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2319 size_t *payload_len, size_t *max_len)
2320{
2321 struct udma_desc *d = to_udma_desc(desc);
2322 struct udma_chan *uc = to_udma_chan(desc->chan);
2323 struct cppi5_host_desc_t *h_desc;
2324
2325 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2326 return ERR_PTR(-ENOTSUPP);
2327
2328 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2329
2330 *max_len = uc->config.metadata_size;
2331
2332 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2333 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2334 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2335
2336 return h_desc->epib;
2337}
2338
2339static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2340 size_t payload_len)
2341{
2342 struct udma_desc *d = to_udma_desc(desc);
2343 struct udma_chan *uc = to_udma_chan(desc->chan);
2344 struct cppi5_host_desc_t *h_desc;
2345 u32 psd_size = payload_len;
2346 u32 flags = 0;
2347
2348 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2349 return -ENOTSUPP;
2350
2351 if (payload_len > uc->config.metadata_size)
2352 return -EINVAL;
2353
2354 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2355 return -EINVAL;
2356
2357 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2358
2359 if (uc->config.needs_epib) {
2360 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2361 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2362 }
2363
2364 cppi5_hdesc_update_flags(h_desc, flags);
2365 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2366
2367 return 0;
2368}
2369
2370static struct dma_descriptor_metadata_ops metadata_ops = {
2371 .attach = udma_attach_metadata,
2372 .get_ptr = udma_get_metadata_ptr,
2373 .set_len = udma_set_metadata_len,
2374};
2375
2376static struct dma_async_tx_descriptor *
2377udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2378 unsigned int sglen, enum dma_transfer_direction dir,
2379 unsigned long tx_flags, void *context)
2380{
2381 struct udma_chan *uc = to_udma_chan(chan);
2382 enum dma_slave_buswidth dev_width;
2383 struct udma_desc *d;
2384 u32 burst;
2385
2386 if (dir != uc->config.dir) {
2387 dev_err(chan->device->dev,
2388 "%s: chan%d is for %s, not supporting %s\n",
2389 __func__, uc->id,
2390 dmaengine_get_direction_text(uc->config.dir),
2391 dmaengine_get_direction_text(dir));
2392 return NULL;
2393 }
2394
2395 if (dir == DMA_DEV_TO_MEM) {
2396 dev_width = uc->cfg.src_addr_width;
2397 burst = uc->cfg.src_maxburst;
2398 } else if (dir == DMA_MEM_TO_DEV) {
2399 dev_width = uc->cfg.dst_addr_width;
2400 burst = uc->cfg.dst_maxburst;
2401 } else {
2402 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2403 return NULL;
2404 }
2405
2406 if (!burst)
2407 burst = 1;
2408
2409 if (uc->config.pkt_mode)
2410 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2411 context);
2412 else
2413 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2414 context);
2415
2416 if (!d)
2417 return NULL;
2418
2419 d->dir = dir;
2420 d->desc_idx = 0;
2421 d->tr_idx = 0;
2422
2423 /* static TR for remote PDMA */
2424 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2425 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002426 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002427 __func__, d->static_tr.bstcnt);
2428
2429 udma_free_hwdesc(uc, d);
2430 kfree(d);
2431 return NULL;
2432 }
2433
2434 if (uc->config.metadata_size)
2435 d->vd.tx.metadata_ops = &metadata_ops;
2436
2437 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2438}
2439
2440static struct udma_desc *
2441udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2442 size_t buf_len, size_t period_len,
2443 enum dma_transfer_direction dir, unsigned long flags)
2444{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002445 struct udma_desc *d;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002446 size_t tr_size, period_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002447 struct cppi5_tr_type1_t *tr_req;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002448 unsigned int periods = buf_len / period_len;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002449 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2450 unsigned int i;
2451 int num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002452
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002453 if (!is_slave_direction(dir)) {
2454 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002455 return NULL;
2456 }
2457
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002458 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2459 &tr0_cnt1, &tr1_cnt0);
2460 if (num_tr < 0) {
2461 dev_err(uc->ud->dev, "size %zu is not supported\n",
2462 period_len);
2463 return NULL;
2464 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002465
2466 /* Now allocate and setup the descriptor. */
2467 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002468 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002469 if (!d)
2470 return NULL;
2471
2472 tr_req = d->hwdesc[0].tr_req_base;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002473 period_addr = buf_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002474 for (i = 0; i < periods; i++) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002475 int tr_idx = i * num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002476
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002477 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2478 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2479
2480 tr_req[tr_idx].addr = period_addr;
2481 tr_req[tr_idx].icnt0 = tr0_cnt0;
2482 tr_req[tr_idx].icnt1 = tr0_cnt1;
2483 tr_req[tr_idx].dim1 = tr0_cnt0;
2484
2485 if (num_tr == 2) {
2486 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2487 CPPI5_TR_CSF_SUPR_EVT);
2488 tr_idx++;
2489
2490 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2491 false, false,
2492 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2493
2494 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2495 tr_req[tr_idx].icnt0 = tr1_cnt0;
2496 tr_req[tr_idx].icnt1 = 1;
2497 tr_req[tr_idx].dim1 = tr1_cnt0;
2498 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002499
2500 if (!(flags & DMA_PREP_INTERRUPT))
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002501 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002502 CPPI5_TR_CSF_SUPR_EVT);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002503
2504 period_addr += period_len;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002505 }
2506
2507 return d;
2508}
2509
2510static struct udma_desc *
2511udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2512 size_t buf_len, size_t period_len,
2513 enum dma_transfer_direction dir, unsigned long flags)
2514{
2515 struct udma_desc *d;
2516 u32 ring_id;
2517 int i;
2518 int periods = buf_len / period_len;
2519
2520 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2521 return NULL;
2522
2523 if (period_len >= SZ_4M)
2524 return NULL;
2525
2526 d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2527 if (!d)
2528 return NULL;
2529
2530 d->hwdesc_count = periods;
2531
2532 /* TODO: re-check this... */
2533 if (dir == DMA_DEV_TO_MEM)
2534 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2535 else
2536 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2537
2538 for (i = 0; i < periods; i++) {
2539 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2540 dma_addr_t period_addr = buf_addr + (period_len * i);
2541 struct cppi5_host_desc_t *h_desc;
2542
2543 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2544 GFP_NOWAIT,
2545 &hwdesc->cppi5_desc_paddr);
2546 if (!hwdesc->cppi5_desc_vaddr) {
2547 dev_err(uc->ud->dev,
2548 "descriptor%d allocation failed\n", i);
2549
2550 udma_free_hwdesc(uc, d);
2551 kfree(d);
2552 return NULL;
2553 }
2554
2555 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2556 h_desc = hwdesc->cppi5_desc_vaddr;
2557
2558 cppi5_hdesc_init(h_desc, 0, 0);
2559 cppi5_hdesc_set_pktlen(h_desc, period_len);
2560
2561 /* Flow and Packed ID */
2562 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2563 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2564 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2565
2566 /* attach each period to a new descriptor */
2567 cppi5_hdesc_attach_buf(h_desc,
2568 period_addr, period_len,
2569 period_addr, period_len);
2570 }
2571
2572 return d;
2573}
2574
2575static struct dma_async_tx_descriptor *
2576udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2577 size_t period_len, enum dma_transfer_direction dir,
2578 unsigned long flags)
2579{
2580 struct udma_chan *uc = to_udma_chan(chan);
2581 enum dma_slave_buswidth dev_width;
2582 struct udma_desc *d;
2583 u32 burst;
2584
2585 if (dir != uc->config.dir) {
2586 dev_err(chan->device->dev,
2587 "%s: chan%d is for %s, not supporting %s\n",
2588 __func__, uc->id,
2589 dmaengine_get_direction_text(uc->config.dir),
2590 dmaengine_get_direction_text(dir));
2591 return NULL;
2592 }
2593
2594 uc->cyclic = true;
2595
2596 if (dir == DMA_DEV_TO_MEM) {
2597 dev_width = uc->cfg.src_addr_width;
2598 burst = uc->cfg.src_maxburst;
2599 } else if (dir == DMA_MEM_TO_DEV) {
2600 dev_width = uc->cfg.dst_addr_width;
2601 burst = uc->cfg.dst_maxburst;
2602 } else {
2603 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2604 return NULL;
2605 }
2606
2607 if (!burst)
2608 burst = 1;
2609
2610 if (uc->config.pkt_mode)
2611 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2612 dir, flags);
2613 else
2614 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2615 dir, flags);
2616
2617 if (!d)
2618 return NULL;
2619
2620 d->sglen = buf_len / period_len;
2621
2622 d->dir = dir;
2623 d->residue = buf_len;
2624
2625 /* static TR for remote PDMA */
2626 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2627 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002628 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002629 __func__, d->static_tr.bstcnt);
2630
2631 udma_free_hwdesc(uc, d);
2632 kfree(d);
2633 return NULL;
2634 }
2635
2636 if (uc->config.metadata_size)
2637 d->vd.tx.metadata_ops = &metadata_ops;
2638
2639 return vchan_tx_prep(&uc->vc, &d->vd, flags);
2640}
2641
2642static struct dma_async_tx_descriptor *
2643udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2644 size_t len, unsigned long tx_flags)
2645{
2646 struct udma_chan *uc = to_udma_chan(chan);
2647 struct udma_desc *d;
2648 struct cppi5_tr_type15_t *tr_req;
2649 int num_tr;
2650 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2651 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2652
2653 if (uc->config.dir != DMA_MEM_TO_MEM) {
2654 dev_err(chan->device->dev,
2655 "%s: chan%d is for %s, not supporting %s\n",
2656 __func__, uc->id,
2657 dmaengine_get_direction_text(uc->config.dir),
2658 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2659 return NULL;
2660 }
2661
Peter Ujfalusia9793402020-02-14 11:14:38 +02002662 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2663 &tr0_cnt1, &tr1_cnt0);
2664 if (num_tr < 0) {
2665 dev_err(uc->ud->dev, "size %zu is not supported\n",
2666 len);
2667 return NULL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002668 }
2669
2670 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2671 if (!d)
2672 return NULL;
2673
2674 d->dir = DMA_MEM_TO_MEM;
2675 d->desc_idx = 0;
2676 d->tr_idx = 0;
2677 d->residue = len;
2678
2679 tr_req = d->hwdesc[0].tr_req_base;
2680
2681 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2682 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2683 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2684
2685 tr_req[0].addr = src;
2686 tr_req[0].icnt0 = tr0_cnt0;
2687 tr_req[0].icnt1 = tr0_cnt1;
2688 tr_req[0].icnt2 = 1;
2689 tr_req[0].icnt3 = 1;
2690 tr_req[0].dim1 = tr0_cnt0;
2691
2692 tr_req[0].daddr = dest;
2693 tr_req[0].dicnt0 = tr0_cnt0;
2694 tr_req[0].dicnt1 = tr0_cnt1;
2695 tr_req[0].dicnt2 = 1;
2696 tr_req[0].dicnt3 = 1;
2697 tr_req[0].ddim1 = tr0_cnt0;
2698
2699 if (num_tr == 2) {
2700 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2701 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2702 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2703
2704 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2705 tr_req[1].icnt0 = tr1_cnt0;
2706 tr_req[1].icnt1 = 1;
2707 tr_req[1].icnt2 = 1;
2708 tr_req[1].icnt3 = 1;
2709
2710 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2711 tr_req[1].dicnt0 = tr1_cnt0;
2712 tr_req[1].dicnt1 = 1;
2713 tr_req[1].dicnt2 = 1;
2714 tr_req[1].dicnt3 = 1;
2715 }
2716
2717 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
2718
2719 if (uc->config.metadata_size)
2720 d->vd.tx.metadata_ops = &metadata_ops;
2721
2722 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2723}
2724
2725static void udma_issue_pending(struct dma_chan *chan)
2726{
2727 struct udma_chan *uc = to_udma_chan(chan);
2728 unsigned long flags;
2729
2730 spin_lock_irqsave(&uc->vc.lock, flags);
2731
2732 /* If we have something pending and no active descriptor, then */
2733 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2734 /*
2735 * start a descriptor if the channel is NOT [marked as
2736 * terminating _and_ it is still running (teardown has not
2737 * completed yet)].
2738 */
2739 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2740 udma_is_chan_running(uc)))
2741 udma_start(uc);
2742 }
2743
2744 spin_unlock_irqrestore(&uc->vc.lock, flags);
2745}
2746
2747static enum dma_status udma_tx_status(struct dma_chan *chan,
2748 dma_cookie_t cookie,
2749 struct dma_tx_state *txstate)
2750{
2751 struct udma_chan *uc = to_udma_chan(chan);
2752 enum dma_status ret;
2753 unsigned long flags;
2754
2755 spin_lock_irqsave(&uc->vc.lock, flags);
2756
2757 ret = dma_cookie_status(chan, cookie, txstate);
2758
Peter Ujfalusi83903182020-02-14 11:14:41 +02002759 if (!udma_is_chan_running(uc))
2760 ret = DMA_COMPLETE;
2761
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002762 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2763 ret = DMA_PAUSED;
2764
2765 if (ret == DMA_COMPLETE || !txstate)
2766 goto out;
2767
2768 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2769 u32 peer_bcnt = 0;
2770 u32 bcnt = 0;
2771 u32 residue = uc->desc->residue;
2772 u32 delay = 0;
2773
2774 if (uc->desc->dir == DMA_MEM_TO_DEV) {
2775 bcnt = udma_tchanrt_read(uc->tchan,
2776 UDMA_TCHAN_RT_SBCNT_REG);
2777
2778 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2779 peer_bcnt = udma_tchanrt_read(uc->tchan,
2780 UDMA_TCHAN_RT_PEER_BCNT_REG);
2781
2782 if (bcnt > peer_bcnt)
2783 delay = bcnt - peer_bcnt;
2784 }
2785 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2786 bcnt = udma_rchanrt_read(uc->rchan,
2787 UDMA_RCHAN_RT_BCNT_REG);
2788
2789 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2790 peer_bcnt = udma_rchanrt_read(uc->rchan,
2791 UDMA_RCHAN_RT_PEER_BCNT_REG);
2792
2793 if (peer_bcnt > bcnt)
2794 delay = peer_bcnt - bcnt;
2795 }
2796 } else {
2797 bcnt = udma_tchanrt_read(uc->tchan,
2798 UDMA_TCHAN_RT_BCNT_REG);
2799 }
2800
2801 bcnt -= uc->bcnt;
2802 if (bcnt && !(bcnt % uc->desc->residue))
2803 residue = 0;
2804 else
2805 residue -= bcnt % uc->desc->residue;
2806
2807 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2808 ret = DMA_COMPLETE;
2809 delay = 0;
2810 }
2811
2812 dma_set_residue(txstate, residue);
2813 dma_set_in_flight_bytes(txstate, delay);
2814
2815 } else {
2816 ret = DMA_COMPLETE;
2817 }
2818
2819out:
2820 spin_unlock_irqrestore(&uc->vc.lock, flags);
2821 return ret;
2822}
2823
2824static int udma_pause(struct dma_chan *chan)
2825{
2826 struct udma_chan *uc = to_udma_chan(chan);
2827
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002828 /* pause the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002829 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002830 case DMA_DEV_TO_MEM:
2831 udma_rchanrt_update_bits(uc->rchan,
2832 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2833 UDMA_PEER_RT_EN_PAUSE,
2834 UDMA_PEER_RT_EN_PAUSE);
2835 break;
2836 case DMA_MEM_TO_DEV:
2837 udma_tchanrt_update_bits(uc->tchan,
2838 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2839 UDMA_PEER_RT_EN_PAUSE,
2840 UDMA_PEER_RT_EN_PAUSE);
2841 break;
2842 case DMA_MEM_TO_MEM:
2843 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2844 UDMA_CHAN_RT_CTL_PAUSE,
2845 UDMA_CHAN_RT_CTL_PAUSE);
2846 break;
2847 default:
2848 return -EINVAL;
2849 }
2850
2851 return 0;
2852}
2853
2854static int udma_resume(struct dma_chan *chan)
2855{
2856 struct udma_chan *uc = to_udma_chan(chan);
2857
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002858 /* resume the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002859 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002860 case DMA_DEV_TO_MEM:
2861 udma_rchanrt_update_bits(uc->rchan,
2862 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2863 UDMA_PEER_RT_EN_PAUSE, 0);
2864
2865 break;
2866 case DMA_MEM_TO_DEV:
2867 udma_tchanrt_update_bits(uc->tchan,
2868 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2869 UDMA_PEER_RT_EN_PAUSE, 0);
2870 break;
2871 case DMA_MEM_TO_MEM:
2872 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2873 UDMA_CHAN_RT_CTL_PAUSE, 0);
2874 break;
2875 default:
2876 return -EINVAL;
2877 }
2878
2879 return 0;
2880}
2881
2882static int udma_terminate_all(struct dma_chan *chan)
2883{
2884 struct udma_chan *uc = to_udma_chan(chan);
2885 unsigned long flags;
2886 LIST_HEAD(head);
2887
2888 spin_lock_irqsave(&uc->vc.lock, flags);
2889
2890 if (udma_is_chan_running(uc))
2891 udma_stop(uc);
2892
2893 if (uc->desc) {
2894 uc->terminated_desc = uc->desc;
2895 uc->desc = NULL;
2896 uc->terminated_desc->terminated = true;
2897 cancel_delayed_work(&uc->tx_drain.work);
2898 }
2899
2900 uc->paused = false;
2901
2902 vchan_get_all_descriptors(&uc->vc, &head);
2903 spin_unlock_irqrestore(&uc->vc.lock, flags);
2904 vchan_dma_desc_free_list(&uc->vc, &head);
2905
2906 return 0;
2907}
2908
2909static void udma_synchronize(struct dma_chan *chan)
2910{
2911 struct udma_chan *uc = to_udma_chan(chan);
2912 unsigned long timeout = msecs_to_jiffies(1000);
2913
2914 vchan_synchronize(&uc->vc);
2915
2916 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2917 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2918 timeout);
2919 if (!timeout) {
2920 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2921 uc->id);
2922 udma_dump_chan_stdata(uc);
2923 udma_reset_chan(uc, true);
2924 }
2925 }
2926
2927 udma_reset_chan(uc, false);
2928 if (udma_is_chan_running(uc))
2929 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2930
2931 cancel_delayed_work_sync(&uc->tx_drain.work);
2932 udma_reset_rings(uc);
2933}
2934
2935static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2936 struct virt_dma_desc *vd,
2937 struct dmaengine_result *result)
2938{
2939 struct udma_chan *uc = to_udma_chan(&vc->chan);
2940 struct udma_desc *d;
2941
2942 if (!vd)
2943 return;
2944
2945 d = to_udma_desc(&vd->tx);
2946
2947 if (d->metadata_size)
2948 udma_fetch_epib(uc, d);
2949
2950 /* Provide residue information for the client */
2951 if (result) {
2952 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2953
2954 if (cppi5_desc_get_type(desc_vaddr) ==
2955 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2956 result->residue = d->residue -
2957 cppi5_hdesc_get_pktlen(desc_vaddr);
2958 if (result->residue)
2959 result->result = DMA_TRANS_ABORTED;
2960 else
2961 result->result = DMA_TRANS_NOERROR;
2962 } else {
2963 result->residue = 0;
2964 result->result = DMA_TRANS_NOERROR;
2965 }
2966 }
2967}
2968
2969/*
2970 * This tasklet handles the completion of a DMA descriptor by
2971 * calling its callback and freeing it.
2972 */
2973static void udma_vchan_complete(unsigned long arg)
2974{
2975 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
2976 struct virt_dma_desc *vd, *_vd;
2977 struct dmaengine_desc_callback cb;
2978 LIST_HEAD(head);
2979
2980 spin_lock_irq(&vc->lock);
2981 list_splice_tail_init(&vc->desc_completed, &head);
2982 vd = vc->cyclic;
2983 if (vd) {
2984 vc->cyclic = NULL;
2985 dmaengine_desc_get_callback(&vd->tx, &cb);
2986 } else {
2987 memset(&cb, 0, sizeof(cb));
2988 }
2989 spin_unlock_irq(&vc->lock);
2990
2991 udma_desc_pre_callback(vc, vd, NULL);
2992 dmaengine_desc_callback_invoke(&cb, NULL);
2993
2994 list_for_each_entry_safe(vd, _vd, &head, node) {
2995 struct dmaengine_result result;
2996
2997 dmaengine_desc_get_callback(&vd->tx, &cb);
2998
2999 list_del(&vd->node);
3000
3001 udma_desc_pre_callback(vc, vd, &result);
3002 dmaengine_desc_callback_invoke(&cb, &result);
3003
3004 vchan_vdesc_fini(vd);
3005 }
3006}
3007
3008static void udma_free_chan_resources(struct dma_chan *chan)
3009{
3010 struct udma_chan *uc = to_udma_chan(chan);
3011 struct udma_dev *ud = to_udma_dev(chan->device);
3012
3013 udma_terminate_all(chan);
3014 if (uc->terminated_desc) {
3015 udma_reset_chan(uc, false);
3016 udma_reset_rings(uc);
3017 }
3018
3019 cancel_delayed_work_sync(&uc->tx_drain.work);
3020 destroy_delayed_work_on_stack(&uc->tx_drain.work);
3021
3022 if (uc->irq_num_ring > 0) {
3023 free_irq(uc->irq_num_ring, uc);
3024
3025 uc->irq_num_ring = 0;
3026 }
3027 if (uc->irq_num_udma > 0) {
3028 free_irq(uc->irq_num_udma, uc);
3029
3030 uc->irq_num_udma = 0;
3031 }
3032
3033 /* Release PSI-L pairing */
3034 if (uc->psil_paired) {
3035 navss_psil_unpair(ud, uc->config.src_thread,
3036 uc->config.dst_thread);
3037 uc->psil_paired = false;
3038 }
3039
3040 vchan_free_chan_resources(&uc->vc);
3041 tasklet_kill(&uc->vc.task);
3042
3043 udma_free_tx_resources(uc);
3044 udma_free_rx_resources(uc);
3045 udma_reset_uchan(uc);
3046
3047 if (uc->use_dma_pool) {
3048 dma_pool_destroy(uc->hdesc_pool);
3049 uc->use_dma_pool = false;
3050 }
3051}
3052
3053static struct platform_driver udma_driver;
3054
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003055struct udma_filter_param {
3056 int remote_thread_id;
3057 u32 atype;
3058};
3059
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003060static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
3061{
3062 struct udma_chan_config *ucc;
3063 struct psil_endpoint_config *ep_config;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003064 struct udma_filter_param *filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003065 struct udma_chan *uc;
3066 struct udma_dev *ud;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003067
3068 if (chan->device->dev->driver != &udma_driver.driver)
3069 return false;
3070
3071 uc = to_udma_chan(chan);
3072 ucc = &uc->config;
3073 ud = uc->ud;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003074 filter_param = param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003075
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003076 if (filter_param->atype > 2) {
3077 dev_err(ud->dev, "Invalid channel atype: %u\n",
3078 filter_param->atype);
3079 return false;
3080 }
3081
3082 ucc->remote_thread_id = filter_param->remote_thread_id;
3083 ucc->atype = filter_param->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003084
3085 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3086 ucc->dir = DMA_MEM_TO_DEV;
3087 else
3088 ucc->dir = DMA_DEV_TO_MEM;
3089
3090 ep_config = psil_get_ep_config(ucc->remote_thread_id);
3091 if (IS_ERR(ep_config)) {
3092 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3093 ucc->remote_thread_id);
3094 ucc->dir = DMA_MEM_TO_MEM;
3095 ucc->remote_thread_id = -1;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003096 ucc->atype = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003097 return false;
3098 }
3099
3100 ucc->pkt_mode = ep_config->pkt_mode;
3101 ucc->channel_tpl = ep_config->channel_tpl;
3102 ucc->notdpkt = ep_config->notdpkt;
3103 ucc->ep_type = ep_config->ep_type;
3104
3105 if (ucc->ep_type != PSIL_EP_NATIVE) {
3106 const struct udma_match_data *match_data = ud->match_data;
3107
3108 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3109 ucc->enable_acc32 = ep_config->pdma_acc32;
3110 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3111 ucc->enable_burst = ep_config->pdma_burst;
3112 }
3113
3114 ucc->needs_epib = ep_config->needs_epib;
3115 ucc->psd_size = ep_config->psd_size;
3116 ucc->metadata_size =
3117 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3118 ucc->psd_size;
3119
3120 if (ucc->pkt_mode)
3121 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3122 ucc->metadata_size, ud->desc_align);
3123
3124 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3125 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3126
3127 return true;
3128}
3129
3130static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3131 struct of_dma *ofdma)
3132{
3133 struct udma_dev *ud = ofdma->of_dma_data;
3134 dma_cap_mask_t mask = ud->ddev.cap_mask;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003135 struct udma_filter_param filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003136 struct dma_chan *chan;
3137
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003138 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003139 return NULL;
3140
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003141 filter_param.remote_thread_id = dma_spec->args[0];
3142 if (dma_spec->args_count == 2)
3143 filter_param.atype = dma_spec->args[1];
3144 else
3145 filter_param.atype = 0;
3146
3147 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
3148 ofdma->of_node);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003149 if (!chan) {
3150 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3151 return ERR_PTR(-EINVAL);
3152 }
3153
3154 return chan;
3155}
3156
3157static struct udma_match_data am654_main_data = {
3158 .psil_base = 0x1000,
3159 .enable_memcpy_support = true,
3160 .statictr_z_mask = GENMASK(11, 0),
3161 .rchan_oes_offset = 0x2000,
3162 .tpl_levels = 2,
3163 .level_start_idx = {
3164 [0] = 8, /* Normal channels */
3165 [1] = 0, /* High Throughput channels */
3166 },
3167};
3168
3169static struct udma_match_data am654_mcu_data = {
3170 .psil_base = 0x6000,
Peter Ujfalusia4e68852020-03-27 16:42:28 +02003171 .enable_memcpy_support = false,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003172 .statictr_z_mask = GENMASK(11, 0),
3173 .rchan_oes_offset = 0x2000,
3174 .tpl_levels = 2,
3175 .level_start_idx = {
3176 [0] = 2, /* Normal channels */
3177 [1] = 0, /* High Throughput channels */
3178 },
3179};
3180
3181static struct udma_match_data j721e_main_data = {
3182 .psil_base = 0x1000,
3183 .enable_memcpy_support = true,
3184 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3185 .statictr_z_mask = GENMASK(23, 0),
3186 .rchan_oes_offset = 0x400,
3187 .tpl_levels = 3,
3188 .level_start_idx = {
3189 [0] = 16, /* Normal channels */
3190 [1] = 4, /* High Throughput channels */
3191 [2] = 0, /* Ultra High Throughput channels */
3192 },
3193};
3194
3195static struct udma_match_data j721e_mcu_data = {
3196 .psil_base = 0x6000,
3197 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3198 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3199 .statictr_z_mask = GENMASK(23, 0),
3200 .rchan_oes_offset = 0x400,
3201 .tpl_levels = 2,
3202 .level_start_idx = {
3203 [0] = 2, /* Normal channels */
3204 [1] = 0, /* High Throughput channels */
3205 },
3206};
3207
3208static const struct of_device_id udma_of_match[] = {
3209 {
3210 .compatible = "ti,am654-navss-main-udmap",
3211 .data = &am654_main_data,
3212 },
3213 {
3214 .compatible = "ti,am654-navss-mcu-udmap",
3215 .data = &am654_mcu_data,
3216 }, {
3217 .compatible = "ti,j721e-navss-main-udmap",
3218 .data = &j721e_main_data,
3219 }, {
3220 .compatible = "ti,j721e-navss-mcu-udmap",
3221 .data = &j721e_mcu_data,
3222 },
3223 { /* Sentinel */ },
3224};
3225
3226static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3227{
3228 struct resource *res;
3229 int i;
3230
3231 for (i = 0; i < MMR_LAST; i++) {
3232 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3233 mmr_names[i]);
3234 ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
3235 if (IS_ERR(ud->mmrs[i]))
3236 return PTR_ERR(ud->mmrs[i]);
3237 }
3238
3239 return 0;
3240}
3241
3242static int udma_setup_resources(struct udma_dev *ud)
3243{
3244 struct device *dev = ud->dev;
3245 int ch_count, ret, i, j;
3246 u32 cap2, cap3;
3247 struct ti_sci_resource_desc *rm_desc;
3248 struct ti_sci_resource *rm_res, irq_res;
3249 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3250 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3251 "ti,sci-rm-range-rchan",
3252 "ti,sci-rm-range-rflow" };
3253
3254 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
3255 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
3256
3257 ud->rflow_cnt = cap3 & 0x3fff;
3258 ud->tchan_cnt = cap2 & 0x1ff;
3259 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
3260 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
3261 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3262
3263 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3264 sizeof(unsigned long), GFP_KERNEL);
3265 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3266 GFP_KERNEL);
3267 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3268 sizeof(unsigned long), GFP_KERNEL);
3269 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3270 GFP_KERNEL);
3271 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3272 sizeof(unsigned long),
3273 GFP_KERNEL);
3274 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3275 BITS_TO_LONGS(ud->rflow_cnt),
3276 sizeof(unsigned long),
3277 GFP_KERNEL);
3278 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3279 sizeof(unsigned long),
3280 GFP_KERNEL);
3281 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3282 GFP_KERNEL);
3283
3284 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3285 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3286 !ud->rflows || !ud->rflow_in_use)
3287 return -ENOMEM;
3288
3289 /*
3290 * RX flows with the same Ids as RX channels are reserved to be used
3291 * as default flows if remote HW can't generate flow_ids. Those
3292 * RX flows can be requested only explicitly by id.
3293 */
3294 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3295
3296 /* by default no GP rflows are assigned to Linux */
3297 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3298
3299 /* Get resource ranges from tisci */
3300 for (i = 0; i < RM_RANGE_LAST; i++)
3301 tisci_rm->rm_ranges[i] =
3302 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3303 tisci_rm->tisci_dev_id,
3304 (char *)range_names[i]);
3305
3306 /* tchan ranges */
3307 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3308 if (IS_ERR(rm_res)) {
3309 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3310 } else {
3311 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3312 for (i = 0; i < rm_res->sets; i++) {
3313 rm_desc = &rm_res->desc[i];
3314 bitmap_clear(ud->tchan_map, rm_desc->start,
3315 rm_desc->num);
3316 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3317 rm_desc->start, rm_desc->num);
3318 }
3319 }
3320 irq_res.sets = rm_res->sets;
3321
3322 /* rchan and matching default flow ranges */
3323 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3324 if (IS_ERR(rm_res)) {
3325 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3326 } else {
3327 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3328 for (i = 0; i < rm_res->sets; i++) {
3329 rm_desc = &rm_res->desc[i];
3330 bitmap_clear(ud->rchan_map, rm_desc->start,
3331 rm_desc->num);
3332 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3333 rm_desc->start, rm_desc->num);
3334 }
3335 }
3336
3337 irq_res.sets += rm_res->sets;
3338 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3339 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3340 for (i = 0; i < rm_res->sets; i++) {
3341 irq_res.desc[i].start = rm_res->desc[i].start;
3342 irq_res.desc[i].num = rm_res->desc[i].num;
3343 }
3344 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3345 for (j = 0; j < rm_res->sets; j++, i++) {
3346 irq_res.desc[i].start = rm_res->desc[j].start +
3347 ud->match_data->rchan_oes_offset;
3348 irq_res.desc[i].num = rm_res->desc[j].num;
3349 }
3350 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3351 kfree(irq_res.desc);
3352 if (ret) {
3353 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3354 return ret;
3355 }
3356
3357 /* GP rflow ranges */
3358 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3359 if (IS_ERR(rm_res)) {
3360 /* all gp flows are assigned exclusively to Linux */
3361 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3362 ud->rflow_cnt - ud->rchan_cnt);
3363 } else {
3364 for (i = 0; i < rm_res->sets; i++) {
3365 rm_desc = &rm_res->desc[i];
3366 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3367 rm_desc->num);
3368 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3369 rm_desc->start, rm_desc->num);
3370 }
3371 }
3372
3373 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3374 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3375 if (!ch_count)
3376 return -ENODEV;
3377
3378 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3379 GFP_KERNEL);
3380 if (!ud->channels)
3381 return -ENOMEM;
3382
3383 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3384 ch_count,
3385 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3386 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3387 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3388 ud->rflow_cnt));
3389
3390 return ch_count;
3391}
3392
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003393static int udma_setup_rx_flush(struct udma_dev *ud)
3394{
3395 struct udma_rx_flush *rx_flush = &ud->rx_flush;
3396 struct cppi5_desc_hdr_t *tr_desc;
3397 struct cppi5_tr_type1_t *tr_req;
3398 struct cppi5_host_desc_t *desc;
3399 struct device *dev = ud->dev;
3400 struct udma_hwdesc *hwdesc;
3401 size_t tr_size;
3402
3403 /* Allocate 1K buffer for discarded data on RX channel teardown */
3404 rx_flush->buffer_size = SZ_1K;
3405 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3406 GFP_KERNEL);
3407 if (!rx_flush->buffer_vaddr)
3408 return -ENOMEM;
3409
3410 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3411 rx_flush->buffer_size,
3412 DMA_TO_DEVICE);
3413 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3414 return -ENOMEM;
3415
3416 /* Set up descriptor to be used for TR mode */
3417 hwdesc = &rx_flush->hwdescs[0];
3418 tr_size = sizeof(struct cppi5_tr_type1_t);
3419 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3420 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3421 ud->desc_align);
3422
3423 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3424 GFP_KERNEL);
3425 if (!hwdesc->cppi5_desc_vaddr)
3426 return -ENOMEM;
3427
3428 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3429 hwdesc->cppi5_desc_size,
3430 DMA_TO_DEVICE);
3431 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3432 return -ENOMEM;
3433
3434 /* Start of the TR req records */
3435 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3436 /* Start address of the TR response array */
3437 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3438
3439 tr_desc = hwdesc->cppi5_desc_vaddr;
3440 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3441 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3442 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3443
3444 tr_req = hwdesc->tr_req_base;
3445 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3446 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3447 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3448
3449 tr_req->addr = rx_flush->buffer_paddr;
3450 tr_req->icnt0 = rx_flush->buffer_size;
3451 tr_req->icnt1 = 1;
3452
Peter Ujfalusi5bbeea32020-05-12 16:45:44 +03003453 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3454 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3455
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003456 /* Set up descriptor to be used for packet mode */
3457 hwdesc = &rx_flush->hwdescs[1];
3458 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3459 CPPI5_INFO0_HDESC_EPIB_SIZE +
3460 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3461 ud->desc_align);
3462
3463 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3464 GFP_KERNEL);
3465 if (!hwdesc->cppi5_desc_vaddr)
3466 return -ENOMEM;
3467
3468 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3469 hwdesc->cppi5_desc_size,
3470 DMA_TO_DEVICE);
3471 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3472 return -ENOMEM;
3473
3474 desc = hwdesc->cppi5_desc_vaddr;
3475 cppi5_hdesc_init(desc, 0, 0);
3476 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3477 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3478
3479 cppi5_hdesc_attach_buf(desc,
3480 rx_flush->buffer_paddr, rx_flush->buffer_size,
3481 rx_flush->buffer_paddr, rx_flush->buffer_size);
3482
3483 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3484 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3485 return 0;
3486}
3487
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003488#ifdef CONFIG_DEBUG_FS
3489static void udma_dbg_summary_show_chan(struct seq_file *s,
3490 struct dma_chan *chan)
3491{
3492 struct udma_chan *uc = to_udma_chan(chan);
3493 struct udma_chan_config *ucc = &uc->config;
3494
3495 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
3496 chan->dbg_client_name ?: "in-use");
3497 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
3498
3499 switch (uc->config.dir) {
3500 case DMA_MEM_TO_MEM:
3501 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
3502 ucc->src_thread, ucc->dst_thread);
3503 break;
3504 case DMA_DEV_TO_MEM:
3505 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
3506 ucc->src_thread, ucc->dst_thread);
3507 break;
3508 case DMA_MEM_TO_DEV:
3509 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
3510 ucc->src_thread, ucc->dst_thread);
3511 break;
3512 default:
3513 seq_printf(s, ")\n");
3514 return;
3515 }
3516
3517 if (ucc->ep_type == PSIL_EP_NATIVE) {
3518 seq_printf(s, "PSI-L Native");
3519 if (ucc->metadata_size) {
3520 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
3521 if (ucc->psd_size)
3522 seq_printf(s, " PSDsize:%u", ucc->psd_size);
3523 seq_printf(s, " ]");
3524 }
3525 } else {
3526 seq_printf(s, "PDMA");
3527 if (ucc->enable_acc32 || ucc->enable_burst)
3528 seq_printf(s, "[%s%s ]",
3529 ucc->enable_acc32 ? " ACC32" : "",
3530 ucc->enable_burst ? " BURST" : "");
3531 }
3532
3533 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
3534}
3535
3536static void udma_dbg_summary_show(struct seq_file *s,
3537 struct dma_device *dma_dev)
3538{
3539 struct dma_chan *chan;
3540
3541 list_for_each_entry(chan, &dma_dev->channels, device_node) {
3542 if (chan->client_count)
3543 udma_dbg_summary_show_chan(s, chan);
3544 }
3545}
3546#endif /* CONFIG_DEBUG_FS */
3547
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003548#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3549 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3550 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3551 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3552 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3553
3554static int udma_probe(struct platform_device *pdev)
3555{
3556 struct device_node *navss_node = pdev->dev.parent->of_node;
3557 struct device *dev = &pdev->dev;
3558 struct udma_dev *ud;
3559 const struct of_device_id *match;
3560 int i, ret;
3561 int ch_count;
3562
3563 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3564 if (ret)
3565 dev_err(dev, "failed to set dma mask stuff\n");
3566
3567 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3568 if (!ud)
3569 return -ENOMEM;
3570
3571 ret = udma_get_mmrs(pdev, ud);
3572 if (ret)
3573 return ret;
3574
3575 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3576 if (IS_ERR(ud->tisci_rm.tisci))
3577 return PTR_ERR(ud->tisci_rm.tisci);
3578
3579 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3580 &ud->tisci_rm.tisci_dev_id);
3581 if (ret) {
3582 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3583 return ret;
3584 }
3585 pdev->id = ud->tisci_rm.tisci_dev_id;
3586
3587 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3588 &ud->tisci_rm.tisci_navss_dev_id);
3589 if (ret) {
3590 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3591 return ret;
3592 }
3593
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003594 ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
3595 if (!ret && ud->atype > 2) {
3596 dev_err(dev, "Invalid atype: %u\n", ud->atype);
3597 return -EINVAL;
3598 }
3599
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003600 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3601 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3602
3603 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3604 if (IS_ERR(ud->ringacc))
3605 return PTR_ERR(ud->ringacc);
3606
3607 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3608 DOMAIN_BUS_TI_SCI_INTA_MSI);
3609 if (!dev->msi_domain) {
3610 dev_err(dev, "Failed to get MSI domain\n");
3611 return -EPROBE_DEFER;
3612 }
3613
3614 match = of_match_node(udma_of_match, dev->of_node);
3615 if (!match) {
3616 dev_err(dev, "No compatible match found\n");
3617 return -ENODEV;
3618 }
3619 ud->match_data = match->data;
3620
3621 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3622 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3623
3624 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3625 ud->ddev.device_config = udma_slave_config;
3626 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3627 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3628 ud->ddev.device_issue_pending = udma_issue_pending;
3629 ud->ddev.device_tx_status = udma_tx_status;
3630 ud->ddev.device_pause = udma_pause;
3631 ud->ddev.device_resume = udma_resume;
3632 ud->ddev.device_terminate_all = udma_terminate_all;
3633 ud->ddev.device_synchronize = udma_synchronize;
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003634#ifdef CONFIG_DEBUG_FS
3635 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3636#endif
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003637
3638 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3639 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3640 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3641 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3642 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3643 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3644 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3645 DESC_METADATA_ENGINE;
3646 if (ud->match_data->enable_memcpy_support) {
3647 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3648 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3649 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3650 }
3651
3652 ud->ddev.dev = dev;
3653 ud->dev = dev;
3654 ud->psil_base = ud->match_data->psil_base;
3655
3656 INIT_LIST_HEAD(&ud->ddev.channels);
3657 INIT_LIST_HEAD(&ud->desc_to_purge);
3658
3659 ch_count = udma_setup_resources(ud);
3660 if (ch_count <= 0)
3661 return ch_count;
3662
3663 spin_lock_init(&ud->lock);
3664 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3665
3666 ud->desc_align = 64;
3667 if (ud->desc_align < dma_get_cache_alignment())
3668 ud->desc_align = dma_get_cache_alignment();
3669
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003670 ret = udma_setup_rx_flush(ud);
3671 if (ret)
3672 return ret;
3673
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003674 for (i = 0; i < ud->tchan_cnt; i++) {
3675 struct udma_tchan *tchan = &ud->tchans[i];
3676
3677 tchan->id = i;
3678 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3679 }
3680
3681 for (i = 0; i < ud->rchan_cnt; i++) {
3682 struct udma_rchan *rchan = &ud->rchans[i];
3683
3684 rchan->id = i;
3685 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3686 }
3687
3688 for (i = 0; i < ud->rflow_cnt; i++) {
3689 struct udma_rflow *rflow = &ud->rflows[i];
3690
3691 rflow->id = i;
3692 }
3693
3694 for (i = 0; i < ch_count; i++) {
3695 struct udma_chan *uc = &ud->channels[i];
3696
3697 uc->ud = ud;
3698 uc->vc.desc_free = udma_desc_free;
3699 uc->id = i;
3700 uc->tchan = NULL;
3701 uc->rchan = NULL;
3702 uc->config.remote_thread_id = -1;
3703 uc->config.dir = DMA_MEM_TO_MEM;
3704 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3705 dev_name(dev), i);
3706
3707 vchan_init(&uc->vc, &ud->ddev);
3708 /* Use custom vchan completion handling */
3709 tasklet_init(&uc->vc.task, udma_vchan_complete,
3710 (unsigned long)&uc->vc);
3711 init_completion(&uc->teardown_completed);
3712 }
3713
3714 ret = dma_async_device_register(&ud->ddev);
3715 if (ret) {
3716 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3717 return ret;
3718 }
3719
3720 platform_set_drvdata(pdev, ud);
3721
3722 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3723 if (ret) {
3724 dev_err(dev, "failed to register of_dma controller\n");
3725 dma_async_device_unregister(&ud->ddev);
3726 }
3727
3728 return ret;
3729}
3730
3731static struct platform_driver udma_driver = {
3732 .driver = {
3733 .name = "ti-udma",
3734 .of_match_table = udma_of_match,
3735 .suppress_bind_attrs = true,
3736 },
3737 .probe = udma_probe,
3738};
3739builtin_platform_driver(udma_driver);
Grygorii Strashkod7024192019-12-23 13:04:51 +02003740
3741/* Private interfaces to UDMA */
3742#include "k3-udma-private.c"