blob: 653aea3f6de2212df9adc2cb23c0e413d41cf4ba [file] [log] [blame]
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7#include <linux/kernel.h>
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02008#include <linux/delay.h>
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02009#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmapool.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_device.h>
22#include <linux/of_irq.h>
23#include <linux/workqueue.h>
24#include <linux/completion.h>
25#include <linux/soc/ti/k3-ringacc.h>
26#include <linux/soc/ti/ti_sci_protocol.h>
27#include <linux/soc/ti/ti_sci_inta_msi.h>
28#include <linux/dma/ti-cppi5.h>
29
30#include "../virt-dma.h"
31#include "k3-udma.h"
32#include "k3-psil-priv.h"
33
34struct udma_static_tr {
35 u8 elsize; /* RPSTR0 */
36 u16 elcnt; /* RPSTR0 */
37 u16 bstcnt; /* RPSTR1 */
38};
39
40#define K3_UDMA_MAX_RFLOWS 1024
41#define K3_UDMA_DEFAULT_RING_SIZE 16
42
43/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44#define UDMA_RFLOW_SRCTAG_NONE 0
45#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
46#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
47#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
48
49#define UDMA_RFLOW_DSTTAG_NONE 0
50#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
51#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
52#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
53#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
54
55struct udma_chan;
56
57enum udma_mmr {
58 MMR_GCFG = 0,
59 MMR_RCHANRT,
60 MMR_TCHANRT,
61 MMR_LAST,
62};
63
64static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
65
66struct udma_tchan {
67 void __iomem *reg_rt;
68
69 int id;
70 struct k3_ring *t_ring; /* Transmit ring */
71 struct k3_ring *tc_ring; /* Transmit Completion ring */
72};
73
74struct udma_rflow {
75 int id;
76 struct k3_ring *fd_ring; /* Free Descriptor ring */
77 struct k3_ring *r_ring; /* Receive ring */
78};
79
80struct udma_rchan {
81 void __iomem *reg_rt;
82
83 int id;
84};
85
86#define UDMA_FLAG_PDMA_ACC32 BIT(0)
87#define UDMA_FLAG_PDMA_BURST BIT(1)
88
89struct udma_match_data {
90 u32 psil_base;
91 bool enable_memcpy_support;
92 u32 flags;
93 u32 statictr_z_mask;
94 u32 rchan_oes_offset;
95
96 u8 tpl_levels;
97 u32 level_start_idx[];
98};
99
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200100struct udma_hwdesc {
101 size_t cppi5_desc_size;
102 void *cppi5_desc_vaddr;
103 dma_addr_t cppi5_desc_paddr;
104
105 /* TR descriptor internal pointers */
106 void *tr_req_base;
107 struct cppi5_tr_resp_t *tr_resp_base;
108};
109
110struct udma_rx_flush {
111 struct udma_hwdesc hwdescs[2];
112
113 size_t buffer_size;
114 void *buffer_vaddr;
115 dma_addr_t buffer_paddr;
116};
117
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200118struct udma_dev {
119 struct dma_device ddev;
120 struct device *dev;
121 void __iomem *mmrs[MMR_LAST];
122 const struct udma_match_data *match_data;
123
124 size_t desc_align; /* alignment to use for descriptors */
125
126 struct udma_tisci_rm tisci_rm;
127
128 struct k3_ringacc *ringacc;
129
130 struct work_struct purge_work;
131 struct list_head desc_to_purge;
132 spinlock_t lock;
133
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200134 struct udma_rx_flush rx_flush;
135
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200136 int tchan_cnt;
137 int echan_cnt;
138 int rchan_cnt;
139 int rflow_cnt;
140 unsigned long *tchan_map;
141 unsigned long *rchan_map;
142 unsigned long *rflow_gp_map;
143 unsigned long *rflow_gp_map_allocated;
144 unsigned long *rflow_in_use;
145
146 struct udma_tchan *tchans;
147 struct udma_rchan *rchans;
148 struct udma_rflow *rflows;
149
150 struct udma_chan *channels;
151 u32 psil_base;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200152 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200153};
154
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200155struct udma_desc {
156 struct virt_dma_desc vd;
157
158 bool terminated;
159
160 enum dma_transfer_direction dir;
161
162 struct udma_static_tr static_tr;
163 u32 residue;
164
165 unsigned int sglen;
166 unsigned int desc_idx; /* Only used for cyclic in packet mode */
167 unsigned int tr_idx;
168
169 u32 metadata_size;
170 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
171
172 unsigned int hwdesc_count;
173 struct udma_hwdesc hwdesc[0];
174};
175
176enum udma_chan_state {
177 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
178 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
179 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
180};
181
182struct udma_tx_drain {
183 struct delayed_work work;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200184 ktime_t tstamp;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200185 u32 residue;
186};
187
188struct udma_chan_config {
189 bool pkt_mode; /* TR or packet */
190 bool needs_epib; /* EPIB is needed for the communication or not */
191 u32 psd_size; /* size of Protocol Specific Data */
192 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
193 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
194 bool notdpkt; /* Suppress sending TDC packet */
195 int remote_thread_id;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200196 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200197 u32 src_thread;
198 u32 dst_thread;
199 enum psil_endpoint_type ep_type;
200 bool enable_acc32;
201 bool enable_burst;
202 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
203
204 enum dma_transfer_direction dir;
205};
206
207struct udma_chan {
208 struct virt_dma_chan vc;
209 struct dma_slave_config cfg;
210 struct udma_dev *ud;
211 struct udma_desc *desc;
212 struct udma_desc *terminated_desc;
213 struct udma_static_tr static_tr;
214 char *name;
215
216 struct udma_tchan *tchan;
217 struct udma_rchan *rchan;
218 struct udma_rflow *rflow;
219
220 bool psil_paired;
221
222 int irq_num_ring;
223 int irq_num_udma;
224
225 bool cyclic;
226 bool paused;
227
228 enum udma_chan_state state;
229 struct completion teardown_completed;
230
231 struct udma_tx_drain tx_drain;
232
233 u32 bcnt; /* number of bytes completed since the start of the channel */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200234
235 /* Channel configuration parameters */
236 struct udma_chan_config config;
237
238 /* dmapool for packet mode descriptors */
239 bool use_dma_pool;
240 struct dma_pool *hdesc_pool;
241
242 u32 id;
243};
244
245static inline struct udma_dev *to_udma_dev(struct dma_device *d)
246{
247 return container_of(d, struct udma_dev, ddev);
248}
249
250static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
251{
252 return container_of(c, struct udma_chan, vc.chan);
253}
254
255static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
256{
257 return container_of(t, struct udma_desc, vd.tx);
258}
259
260/* Generic register access functions */
261static inline u32 udma_read(void __iomem *base, int reg)
262{
263 return readl(base + reg);
264}
265
266static inline void udma_write(void __iomem *base, int reg, u32 val)
267{
268 writel(val, base + reg);
269}
270
271static inline void udma_update_bits(void __iomem *base, int reg,
272 u32 mask, u32 val)
273{
274 u32 tmp, orig;
275
276 orig = readl(base + reg);
277 tmp = orig & ~mask;
278 tmp |= (val & mask);
279
280 if (tmp != orig)
281 writel(tmp, base + reg);
282}
283
284/* TCHANRT */
285static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
286{
287 if (!tchan)
288 return 0;
289 return udma_read(tchan->reg_rt, reg);
290}
291
292static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
293 u32 val)
294{
295 if (!tchan)
296 return;
297 udma_write(tchan->reg_rt, reg, val);
298}
299
300static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
301 u32 mask, u32 val)
302{
303 if (!tchan)
304 return;
305 udma_update_bits(tchan->reg_rt, reg, mask, val);
306}
307
308/* RCHANRT */
309static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
310{
311 if (!rchan)
312 return 0;
313 return udma_read(rchan->reg_rt, reg);
314}
315
316static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
317 u32 val)
318{
319 if (!rchan)
320 return;
321 udma_write(rchan->reg_rt, reg, val);
322}
323
324static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
325 u32 mask, u32 val)
326{
327 if (!rchan)
328 return;
329 udma_update_bits(rchan->reg_rt, reg, mask, val);
330}
331
332static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
333{
334 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
335
336 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
337 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
338 tisci_rm->tisci_navss_dev_id,
339 src_thread, dst_thread);
340}
341
342static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
343 u32 dst_thread)
344{
345 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
346
347 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
348 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
349 tisci_rm->tisci_navss_dev_id,
350 src_thread, dst_thread);
351}
352
353static void udma_reset_uchan(struct udma_chan *uc)
354{
355 memset(&uc->config, 0, sizeof(uc->config));
356 uc->config.remote_thread_id = -1;
357 uc->state = UDMA_CHAN_IS_IDLE;
358}
359
360static void udma_dump_chan_stdata(struct udma_chan *uc)
361{
362 struct device *dev = uc->ud->dev;
363 u32 offset;
364 int i;
365
366 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
367 dev_dbg(dev, "TCHAN State data:\n");
368 for (i = 0; i < 32; i++) {
369 offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
370 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
371 udma_tchanrt_read(uc->tchan, offset));
372 }
373 }
374
375 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
376 dev_dbg(dev, "RCHAN State data:\n");
377 for (i = 0; i < 32; i++) {
378 offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
379 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
380 udma_rchanrt_read(uc->rchan, offset));
381 }
382 }
383}
384
385static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
386 int idx)
387{
388 return d->hwdesc[idx].cppi5_desc_paddr;
389}
390
391static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
392{
393 return d->hwdesc[idx].cppi5_desc_vaddr;
394}
395
396static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
397 dma_addr_t paddr)
398{
399 struct udma_desc *d = uc->terminated_desc;
400
401 if (d) {
402 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
403 d->desc_idx);
404
405 if (desc_paddr != paddr)
406 d = NULL;
407 }
408
409 if (!d) {
410 d = uc->desc;
411 if (d) {
412 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
413 d->desc_idx);
414
415 if (desc_paddr != paddr)
416 d = NULL;
417 }
418 }
419
420 return d;
421}
422
423static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
424{
425 if (uc->use_dma_pool) {
426 int i;
427
428 for (i = 0; i < d->hwdesc_count; i++) {
429 if (!d->hwdesc[i].cppi5_desc_vaddr)
430 continue;
431
432 dma_pool_free(uc->hdesc_pool,
433 d->hwdesc[i].cppi5_desc_vaddr,
434 d->hwdesc[i].cppi5_desc_paddr);
435
436 d->hwdesc[i].cppi5_desc_vaddr = NULL;
437 }
438 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
439 struct udma_dev *ud = uc->ud;
440
441 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
442 d->hwdesc[0].cppi5_desc_vaddr,
443 d->hwdesc[0].cppi5_desc_paddr);
444
445 d->hwdesc[0].cppi5_desc_vaddr = NULL;
446 }
447}
448
449static void udma_purge_desc_work(struct work_struct *work)
450{
451 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
452 struct virt_dma_desc *vd, *_vd;
453 unsigned long flags;
454 LIST_HEAD(head);
455
456 spin_lock_irqsave(&ud->lock, flags);
457 list_splice_tail_init(&ud->desc_to_purge, &head);
458 spin_unlock_irqrestore(&ud->lock, flags);
459
460 list_for_each_entry_safe(vd, _vd, &head, node) {
461 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
462 struct udma_desc *d = to_udma_desc(&vd->tx);
463
464 udma_free_hwdesc(uc, d);
465 list_del(&vd->node);
466 kfree(d);
467 }
468
469 /* If more to purge, schedule the work again */
470 if (!list_empty(&ud->desc_to_purge))
471 schedule_work(&ud->purge_work);
472}
473
474static void udma_desc_free(struct virt_dma_desc *vd)
475{
476 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
477 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
478 struct udma_desc *d = to_udma_desc(&vd->tx);
479 unsigned long flags;
480
481 if (uc->terminated_desc == d)
482 uc->terminated_desc = NULL;
483
484 if (uc->use_dma_pool) {
485 udma_free_hwdesc(uc, d);
486 kfree(d);
487 return;
488 }
489
490 spin_lock_irqsave(&ud->lock, flags);
491 list_add_tail(&vd->node, &ud->desc_to_purge);
492 spin_unlock_irqrestore(&ud->lock, flags);
493
494 schedule_work(&ud->purge_work);
495}
496
497static bool udma_is_chan_running(struct udma_chan *uc)
498{
499 u32 trt_ctl = 0;
500 u32 rrt_ctl = 0;
501
502 if (uc->tchan)
503 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
504 if (uc->rchan)
505 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
506
507 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
508 return true;
509
510 return false;
511}
512
513static bool udma_is_chan_paused(struct udma_chan *uc)
514{
515 u32 val, pause_mask;
516
Peter Ujfalusic7450bb2020-02-14 11:14:40 +0200517 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200518 case DMA_DEV_TO_MEM:
519 val = udma_rchanrt_read(uc->rchan,
520 UDMA_RCHAN_RT_PEER_RT_EN_REG);
521 pause_mask = UDMA_PEER_RT_EN_PAUSE;
522 break;
523 case DMA_MEM_TO_DEV:
524 val = udma_tchanrt_read(uc->tchan,
525 UDMA_TCHAN_RT_PEER_RT_EN_REG);
526 pause_mask = UDMA_PEER_RT_EN_PAUSE;
527 break;
528 case DMA_MEM_TO_MEM:
529 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
530 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
531 break;
532 default:
533 return false;
534 }
535
536 if (val & pause_mask)
537 return true;
538
539 return false;
540}
541
542static void udma_sync_for_device(struct udma_chan *uc, int idx)
543{
544 struct udma_desc *d = uc->desc;
545
546 if (uc->cyclic && uc->config.pkt_mode) {
547 dma_sync_single_for_device(uc->ud->dev,
548 d->hwdesc[idx].cppi5_desc_paddr,
549 d->hwdesc[idx].cppi5_desc_size,
550 DMA_TO_DEVICE);
551 } else {
552 int i;
553
554 for (i = 0; i < d->hwdesc_count; i++) {
555 if (!d->hwdesc[i].cppi5_desc_vaddr)
556 continue;
557
558 dma_sync_single_for_device(uc->ud->dev,
559 d->hwdesc[i].cppi5_desc_paddr,
560 d->hwdesc[i].cppi5_desc_size,
561 DMA_TO_DEVICE);
562 }
563 }
564}
565
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200566static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
567{
568 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
569}
570
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200571static int udma_push_to_ring(struct udma_chan *uc, int idx)
572{
573 struct udma_desc *d = uc->desc;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200574 struct k3_ring *ring = NULL;
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200575 dma_addr_t paddr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200576
577 switch (uc->config.dir) {
578 case DMA_DEV_TO_MEM:
579 ring = uc->rflow->fd_ring;
580 break;
581 case DMA_MEM_TO_DEV:
582 case DMA_MEM_TO_MEM:
583 ring = uc->tchan->t_ring;
584 break;
585 default:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200586 return -EINVAL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200587 }
588
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200589 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
590 if (idx == -1) {
591 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
592 } else {
593 paddr = udma_curr_cppi5_desc_paddr(d, idx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200594
595 wmb(); /* Ensure that writes are not moved over this point */
596 udma_sync_for_device(uc, idx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200597 }
598
Peter Ujfalusi6fea8732020-05-12 16:46:11 +0300599 return k3_ringacc_ring_push(ring, &paddr);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200600}
601
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200602static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
603{
604 if (uc->config.dir != DMA_DEV_TO_MEM)
605 return false;
606
607 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
608 return true;
609
610 return false;
611}
612
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200613static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
614{
615 struct k3_ring *ring = NULL;
616 int ret = -ENOENT;
617
618 switch (uc->config.dir) {
619 case DMA_DEV_TO_MEM:
620 ring = uc->rflow->r_ring;
621 break;
622 case DMA_MEM_TO_DEV:
623 case DMA_MEM_TO_MEM:
624 ring = uc->tchan->tc_ring;
625 break;
626 default:
627 break;
628 }
629
630 if (ring && k3_ringacc_ring_get_occ(ring)) {
631 struct udma_desc *d = NULL;
632
633 ret = k3_ringacc_ring_pop(ring, addr);
634 if (ret)
635 return ret;
636
637 /* Teardown completion */
638 if (cppi5_desc_is_tdcm(*addr))
639 return ret;
640
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200641 /* Check for flush descriptor */
642 if (udma_desc_is_rx_flush(uc, *addr))
643 return -ENOENT;
644
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200645 d = udma_udma_desc_from_paddr(uc, *addr);
646
647 if (d)
648 dma_sync_single_for_cpu(uc->ud->dev, *addr,
649 d->hwdesc[0].cppi5_desc_size,
650 DMA_FROM_DEVICE);
651 rmb(); /* Ensure that reads are not moved before this point */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200652 }
653
654 return ret;
655}
656
657static void udma_reset_rings(struct udma_chan *uc)
658{
659 struct k3_ring *ring1 = NULL;
660 struct k3_ring *ring2 = NULL;
661
662 switch (uc->config.dir) {
663 case DMA_DEV_TO_MEM:
664 if (uc->rchan) {
665 ring1 = uc->rflow->fd_ring;
666 ring2 = uc->rflow->r_ring;
667 }
668 break;
669 case DMA_MEM_TO_DEV:
670 case DMA_MEM_TO_MEM:
671 if (uc->tchan) {
672 ring1 = uc->tchan->t_ring;
673 ring2 = uc->tchan->tc_ring;
674 }
675 break;
676 default:
677 break;
678 }
679
680 if (ring1)
681 k3_ringacc_ring_reset_dma(ring1,
682 k3_ringacc_ring_get_occ(ring1));
683 if (ring2)
684 k3_ringacc_ring_reset(ring2);
685
686 /* make sure we are not leaking memory by stalled descriptor */
687 if (uc->terminated_desc) {
688 udma_desc_free(&uc->terminated_desc->vd);
689 uc->terminated_desc = NULL;
690 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200691}
692
693static void udma_reset_counters(struct udma_chan *uc)
694{
695 u32 val;
696
697 if (uc->tchan) {
698 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
699 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
700
701 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
702 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
703
704 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
705 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
706
707 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
708 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
709 }
710
711 if (uc->rchan) {
712 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
713 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
714
715 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
716 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
717
718 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
719 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
720
721 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
722 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
723 }
724
725 uc->bcnt = 0;
726}
727
728static int udma_reset_chan(struct udma_chan *uc, bool hard)
729{
730 switch (uc->config.dir) {
731 case DMA_DEV_TO_MEM:
732 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
733 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
734 break;
735 case DMA_MEM_TO_DEV:
736 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
737 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
738 break;
739 case DMA_MEM_TO_MEM:
740 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
741 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
742 break;
743 default:
744 return -EINVAL;
745 }
746
747 /* Reset all counters */
748 udma_reset_counters(uc);
749
750 /* Hard reset: re-initialize the channel to reset */
751 if (hard) {
752 struct udma_chan_config ucc_backup;
753 int ret;
754
755 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
756 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
757
758 /* restore the channel configuration */
759 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
760 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
761 if (ret)
762 return ret;
763
764 /*
765 * Setting forced teardown after forced reset helps recovering
766 * the rchan.
767 */
768 if (uc->config.dir == DMA_DEV_TO_MEM)
769 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
770 UDMA_CHAN_RT_CTL_EN |
771 UDMA_CHAN_RT_CTL_TDOWN |
772 UDMA_CHAN_RT_CTL_FTDOWN);
773 }
774 uc->state = UDMA_CHAN_IS_IDLE;
775
776 return 0;
777}
778
779static void udma_start_desc(struct udma_chan *uc)
780{
781 struct udma_chan_config *ucc = &uc->config;
782
783 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
784 int i;
785
786 /* Push all descriptors to ring for packet mode cyclic or RX */
787 for (i = 0; i < uc->desc->sglen; i++)
788 udma_push_to_ring(uc, i);
789 } else {
790 udma_push_to_ring(uc, 0);
791 }
792}
793
794static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
795{
796 /* Only PDMAs have staticTR */
797 if (uc->config.ep_type == PSIL_EP_NATIVE)
798 return false;
799
800 /* Check if the staticTR configuration has changed for TX */
801 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
802 return true;
803
804 return false;
805}
806
807static int udma_start(struct udma_chan *uc)
808{
809 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
810
811 if (!vd) {
812 uc->desc = NULL;
813 return -ENOENT;
814 }
815
816 list_del(&vd->node);
817
818 uc->desc = to_udma_desc(&vd->tx);
819
820 /* Channel is already running and does not need reconfiguration */
821 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
822 udma_start_desc(uc);
823 goto out;
824 }
825
826 /* Make sure that we clear the teardown bit, if it is set */
827 udma_reset_chan(uc, false);
828
829 /* Push descriptors before we start the channel */
830 udma_start_desc(uc);
831
832 switch (uc->desc->dir) {
833 case DMA_DEV_TO_MEM:
834 /* Config remote TR */
835 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
836 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
837 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
838 const struct udma_match_data *match_data =
839 uc->ud->match_data;
840
841 if (uc->config.enable_acc32)
842 val |= PDMA_STATIC_TR_XY_ACC32;
843 if (uc->config.enable_burst)
844 val |= PDMA_STATIC_TR_XY_BURST;
845
846 udma_rchanrt_write(uc->rchan,
847 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
848
849 udma_rchanrt_write(uc->rchan,
850 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
851 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
852 match_data->statictr_z_mask));
853
854 /* save the current staticTR configuration */
855 memcpy(&uc->static_tr, &uc->desc->static_tr,
856 sizeof(uc->static_tr));
857 }
858
859 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
860 UDMA_CHAN_RT_CTL_EN);
861
862 /* Enable remote */
863 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
864 UDMA_PEER_RT_EN_ENABLE);
865
866 break;
867 case DMA_MEM_TO_DEV:
868 /* Config remote TR */
869 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
870 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
871 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
872
873 if (uc->config.enable_acc32)
874 val |= PDMA_STATIC_TR_XY_ACC32;
875 if (uc->config.enable_burst)
876 val |= PDMA_STATIC_TR_XY_BURST;
877
878 udma_tchanrt_write(uc->tchan,
879 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
880
881 /* save the current staticTR configuration */
882 memcpy(&uc->static_tr, &uc->desc->static_tr,
883 sizeof(uc->static_tr));
884 }
885
886 /* Enable remote */
887 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
888 UDMA_PEER_RT_EN_ENABLE);
889
890 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
891 UDMA_CHAN_RT_CTL_EN);
892
893 break;
894 case DMA_MEM_TO_MEM:
895 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
896 UDMA_CHAN_RT_CTL_EN);
897 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
898 UDMA_CHAN_RT_CTL_EN);
899
900 break;
901 default:
902 return -EINVAL;
903 }
904
905 uc->state = UDMA_CHAN_IS_ACTIVE;
906out:
907
908 return 0;
909}
910
911static int udma_stop(struct udma_chan *uc)
912{
913 enum udma_chan_state old_state = uc->state;
914
915 uc->state = UDMA_CHAN_IS_TERMINATING;
916 reinit_completion(&uc->teardown_completed);
917
918 switch (uc->config.dir) {
919 case DMA_DEV_TO_MEM:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200920 if (!uc->cyclic && !uc->desc)
921 udma_push_to_ring(uc, -1);
922
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200923 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
924 UDMA_PEER_RT_EN_ENABLE |
925 UDMA_PEER_RT_EN_TEARDOWN);
926 break;
927 case DMA_MEM_TO_DEV:
928 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
929 UDMA_PEER_RT_EN_ENABLE |
930 UDMA_PEER_RT_EN_FLUSH);
931 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
932 UDMA_CHAN_RT_CTL_EN |
933 UDMA_CHAN_RT_CTL_TDOWN);
934 break;
935 case DMA_MEM_TO_MEM:
936 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
937 UDMA_CHAN_RT_CTL_EN |
938 UDMA_CHAN_RT_CTL_TDOWN);
939 break;
940 default:
941 uc->state = old_state;
942 complete_all(&uc->teardown_completed);
943 return -EINVAL;
944 }
945
946 return 0;
947}
948
949static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
950{
951 struct udma_desc *d = uc->desc;
952 struct cppi5_host_desc_t *h_desc;
953
954 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
955 cppi5_hdesc_reset_to_original(h_desc);
956 udma_push_to_ring(uc, d->desc_idx);
957 d->desc_idx = (d->desc_idx + 1) % d->sglen;
958}
959
960static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
961{
962 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
963
964 memcpy(d->metadata, h_desc->epib, d->metadata_size);
965}
966
967static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
968{
969 u32 peer_bcnt, bcnt;
970
971 /* Only TX towards PDMA is affected */
972 if (uc->config.ep_type == PSIL_EP_NATIVE ||
973 uc->config.dir != DMA_MEM_TO_DEV)
974 return true;
975
976 peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
977 bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
978
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200979 /* Transfer is incomplete, store current residue and time stamp */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200980 if (peer_bcnt < bcnt) {
981 uc->tx_drain.residue = bcnt - peer_bcnt;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200982 uc->tx_drain.tstamp = ktime_get();
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200983 return false;
984 }
985
986 return true;
987}
988
989static void udma_check_tx_completion(struct work_struct *work)
990{
991 struct udma_chan *uc = container_of(work, typeof(*uc),
992 tx_drain.work.work);
993 bool desc_done = true;
994 u32 residue_diff;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200995 ktime_t time_diff;
996 unsigned long delay;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200997
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200998 while (1) {
999 if (uc->desc) {
1000 /* Get previous residue and time stamp */
1001 residue_diff = uc->tx_drain.residue;
1002 time_diff = uc->tx_drain.tstamp;
1003 /*
1004 * Get current residue and time stamp or see if
1005 * transfer is complete
1006 */
1007 desc_done = udma_is_desc_really_done(uc, uc->desc);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001008 }
1009
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02001010 if (!desc_done) {
1011 /*
1012 * Find the time delta and residue delta w.r.t
1013 * previous poll
1014 */
1015 time_diff = ktime_sub(uc->tx_drain.tstamp,
1016 time_diff) + 1;
1017 residue_diff -= uc->tx_drain.residue;
1018 if (residue_diff) {
1019 /*
1020 * Try to guess when we should check
1021 * next time by calculating rate at
1022 * which data is being drained at the
1023 * peer device
1024 */
1025 delay = (time_diff / residue_diff) *
1026 uc->tx_drain.residue;
1027 } else {
1028 /* No progress, check again in 1 second */
1029 schedule_delayed_work(&uc->tx_drain.work, HZ);
1030 break;
1031 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001032
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02001033 usleep_range(ktime_to_us(delay),
1034 ktime_to_us(delay) + 10);
1035 continue;
1036 }
1037
1038 if (uc->desc) {
1039 struct udma_desc *d = uc->desc;
1040
1041 uc->bcnt += d->residue;
1042 udma_start(uc);
1043 vchan_cookie_complete(&d->vd);
1044 break;
1045 }
1046
1047 break;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001048 }
1049}
1050
1051static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1052{
1053 struct udma_chan *uc = data;
1054 struct udma_desc *d;
1055 unsigned long flags;
1056 dma_addr_t paddr = 0;
1057
1058 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1059 return IRQ_HANDLED;
1060
1061 spin_lock_irqsave(&uc->vc.lock, flags);
1062
1063 /* Teardown completion message */
1064 if (cppi5_desc_is_tdcm(paddr)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001065 complete_all(&uc->teardown_completed);
1066
1067 if (uc->terminated_desc) {
1068 udma_desc_free(&uc->terminated_desc->vd);
1069 uc->terminated_desc = NULL;
1070 }
1071
1072 if (!uc->desc)
1073 udma_start(uc);
1074
1075 goto out;
1076 }
1077
1078 d = udma_udma_desc_from_paddr(uc, paddr);
1079
1080 if (d) {
1081 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1082 d->desc_idx);
1083 if (desc_paddr != paddr) {
1084 dev_err(uc->ud->dev, "not matching descriptors!\n");
1085 goto out;
1086 }
1087
Peter Ujfalusi83903182020-02-14 11:14:41 +02001088 if (d == uc->desc) {
1089 /* active descriptor */
1090 if (uc->cyclic) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001091 udma_cyclic_packet_elapsed(uc);
1092 vchan_cyclic_callback(&d->vd);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001093 } else {
1094 if (udma_is_desc_really_done(uc, d)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001095 uc->bcnt += d->residue;
1096 udma_start(uc);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001097 vchan_cookie_complete(&d->vd);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001098 } else {
1099 schedule_delayed_work(&uc->tx_drain.work,
1100 0);
1101 }
1102 }
Peter Ujfalusi83903182020-02-14 11:14:41 +02001103 } else {
1104 /*
1105 * terminated descriptor, mark the descriptor as
1106 * completed to update the channel's cookie marker
1107 */
1108 dma_cookie_complete(&d->vd.tx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001109 }
1110 }
1111out:
1112 spin_unlock_irqrestore(&uc->vc.lock, flags);
1113
1114 return IRQ_HANDLED;
1115}
1116
1117static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1118{
1119 struct udma_chan *uc = data;
1120 struct udma_desc *d;
1121 unsigned long flags;
1122
1123 spin_lock_irqsave(&uc->vc.lock, flags);
1124 d = uc->desc;
1125 if (d) {
1126 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1127
1128 if (uc->cyclic) {
1129 vchan_cyclic_callback(&d->vd);
1130 } else {
1131 /* TODO: figure out the real amount of data */
1132 uc->bcnt += d->residue;
1133 udma_start(uc);
1134 vchan_cookie_complete(&d->vd);
1135 }
1136 }
1137
1138 spin_unlock_irqrestore(&uc->vc.lock, flags);
1139
1140 return IRQ_HANDLED;
1141}
1142
Grygorii Strashkod7024192019-12-23 13:04:51 +02001143/**
1144 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1145 * @ud: UDMA device
1146 * @from: Start the search from this flow id number
1147 * @cnt: Number of consecutive flow ids to allocate
1148 *
1149 * Allocate range of RX flow ids for future use, those flows can be requested
1150 * only using explicit flow id number. if @from is set to -1 it will try to find
1151 * first free range. if @from is positive value it will force allocation only
1152 * of the specified range of flows.
1153 *
1154 * Returns -ENOMEM if can't find free range.
1155 * -EEXIST if requested range is busy.
1156 * -EINVAL if wrong input values passed.
1157 * Returns flow id on success.
1158 */
1159static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1160{
1161 int start, tmp_from;
1162 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1163
1164 tmp_from = from;
1165 if (tmp_from < 0)
1166 tmp_from = ud->rchan_cnt;
1167 /* default flows can't be allocated and accessible only by id */
1168 if (tmp_from < ud->rchan_cnt)
1169 return -EINVAL;
1170
1171 if (tmp_from + cnt > ud->rflow_cnt)
1172 return -EINVAL;
1173
1174 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1175 ud->rflow_cnt);
1176
1177 start = bitmap_find_next_zero_area(tmp,
1178 ud->rflow_cnt,
1179 tmp_from, cnt, 0);
1180 if (start >= ud->rflow_cnt)
1181 return -ENOMEM;
1182
1183 if (from >= 0 && start != from)
1184 return -EEXIST;
1185
1186 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1187 return start;
1188}
1189
1190static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1191{
1192 if (from < ud->rchan_cnt)
1193 return -EINVAL;
1194 if (from + cnt > ud->rflow_cnt)
1195 return -EINVAL;
1196
1197 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1198 return 0;
1199}
1200
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001201static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1202{
1203 /*
1204 * Attempt to request rflow by ID can be made for any rflow
1205 * if not in use with assumption that caller knows what's doing.
1206 * TI-SCI FW will perform additional permission check ant way, it's
1207 * safe
1208 */
1209
1210 if (id < 0 || id >= ud->rflow_cnt)
1211 return ERR_PTR(-ENOENT);
1212
1213 if (test_bit(id, ud->rflow_in_use))
1214 return ERR_PTR(-ENOENT);
1215
1216 /* GP rflow has to be allocated first */
1217 if (!test_bit(id, ud->rflow_gp_map) &&
1218 !test_bit(id, ud->rflow_gp_map_allocated))
1219 return ERR_PTR(-EINVAL);
1220
1221 dev_dbg(ud->dev, "get rflow%d\n", id);
1222 set_bit(id, ud->rflow_in_use);
1223 return &ud->rflows[id];
1224}
1225
1226static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1227{
1228 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1229 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1230 return;
1231 }
1232
1233 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1234 clear_bit(rflow->id, ud->rflow_in_use);
1235}
1236
1237#define UDMA_RESERVE_RESOURCE(res) \
1238static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1239 enum udma_tp_level tpl, \
1240 int id) \
1241{ \
1242 if (id >= 0) { \
1243 if (test_bit(id, ud->res##_map)) { \
1244 dev_err(ud->dev, "res##%d is in use\n", id); \
1245 return ERR_PTR(-ENOENT); \
1246 } \
1247 } else { \
1248 int start; \
1249 \
1250 if (tpl >= ud->match_data->tpl_levels) \
1251 tpl = ud->match_data->tpl_levels - 1; \
1252 \
1253 start = ud->match_data->level_start_idx[tpl]; \
1254 \
1255 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1256 start); \
1257 if (id == ud->res##_cnt) { \
1258 return ERR_PTR(-ENOENT); \
1259 } \
1260 } \
1261 \
1262 set_bit(id, ud->res##_map); \
1263 return &ud->res##s[id]; \
1264}
1265
1266UDMA_RESERVE_RESOURCE(tchan);
1267UDMA_RESERVE_RESOURCE(rchan);
1268
1269static int udma_get_tchan(struct udma_chan *uc)
1270{
1271 struct udma_dev *ud = uc->ud;
1272
1273 if (uc->tchan) {
1274 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1275 uc->id, uc->tchan->id);
1276 return 0;
1277 }
1278
1279 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001280
Samuel Zou214a0002020-05-06 17:25:46 +08001281 return PTR_ERR_OR_ZERO(uc->tchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001282}
1283
1284static int udma_get_rchan(struct udma_chan *uc)
1285{
1286 struct udma_dev *ud = uc->ud;
1287
1288 if (uc->rchan) {
1289 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1290 uc->id, uc->rchan->id);
1291 return 0;
1292 }
1293
1294 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001295
Samuel Zou214a0002020-05-06 17:25:46 +08001296 return PTR_ERR_OR_ZERO(uc->rchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001297}
1298
1299static int udma_get_chan_pair(struct udma_chan *uc)
1300{
1301 struct udma_dev *ud = uc->ud;
1302 const struct udma_match_data *match_data = ud->match_data;
1303 int chan_id, end;
1304
1305 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1306 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1307 uc->id, uc->tchan->id);
1308 return 0;
1309 }
1310
1311 if (uc->tchan) {
1312 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1313 uc->id, uc->tchan->id);
1314 return -EBUSY;
1315 } else if (uc->rchan) {
1316 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1317 uc->id, uc->rchan->id);
1318 return -EBUSY;
1319 }
1320
1321 /* Can be optimized, but let's have it like this for now */
1322 end = min(ud->tchan_cnt, ud->rchan_cnt);
1323 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1324 chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
1325 for (; chan_id < end; chan_id++) {
1326 if (!test_bit(chan_id, ud->tchan_map) &&
1327 !test_bit(chan_id, ud->rchan_map))
1328 break;
1329 }
1330
1331 if (chan_id == end)
1332 return -ENOENT;
1333
1334 set_bit(chan_id, ud->tchan_map);
1335 set_bit(chan_id, ud->rchan_map);
1336 uc->tchan = &ud->tchans[chan_id];
1337 uc->rchan = &ud->rchans[chan_id];
1338
1339 return 0;
1340}
1341
1342static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1343{
1344 struct udma_dev *ud = uc->ud;
1345
1346 if (!uc->rchan) {
1347 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1348 return -EINVAL;
1349 }
1350
1351 if (uc->rflow) {
1352 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1353 uc->id, uc->rflow->id);
1354 return 0;
1355 }
1356
1357 uc->rflow = __udma_get_rflow(ud, flow_id);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001358
Samuel Zou214a0002020-05-06 17:25:46 +08001359 return PTR_ERR_OR_ZERO(uc->rflow);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001360}
1361
1362static void udma_put_rchan(struct udma_chan *uc)
1363{
1364 struct udma_dev *ud = uc->ud;
1365
1366 if (uc->rchan) {
1367 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1368 uc->rchan->id);
1369 clear_bit(uc->rchan->id, ud->rchan_map);
1370 uc->rchan = NULL;
1371 }
1372}
1373
1374static void udma_put_tchan(struct udma_chan *uc)
1375{
1376 struct udma_dev *ud = uc->ud;
1377
1378 if (uc->tchan) {
1379 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1380 uc->tchan->id);
1381 clear_bit(uc->tchan->id, ud->tchan_map);
1382 uc->tchan = NULL;
1383 }
1384}
1385
1386static void udma_put_rflow(struct udma_chan *uc)
1387{
1388 struct udma_dev *ud = uc->ud;
1389
1390 if (uc->rflow) {
1391 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1392 uc->rflow->id);
1393 __udma_put_rflow(ud, uc->rflow);
1394 uc->rflow = NULL;
1395 }
1396}
1397
1398static void udma_free_tx_resources(struct udma_chan *uc)
1399{
1400 if (!uc->tchan)
1401 return;
1402
1403 k3_ringacc_ring_free(uc->tchan->t_ring);
1404 k3_ringacc_ring_free(uc->tchan->tc_ring);
1405 uc->tchan->t_ring = NULL;
1406 uc->tchan->tc_ring = NULL;
1407
1408 udma_put_tchan(uc);
1409}
1410
1411static int udma_alloc_tx_resources(struct udma_chan *uc)
1412{
1413 struct k3_ring_cfg ring_cfg;
1414 struct udma_dev *ud = uc->ud;
1415 int ret;
1416
1417 ret = udma_get_tchan(uc);
1418 if (ret)
1419 return ret;
1420
1421 uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
1422 uc->tchan->id, 0);
1423 if (!uc->tchan->t_ring) {
1424 ret = -EBUSY;
1425 goto err_tx_ring;
1426 }
1427
1428 uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1429 if (!uc->tchan->tc_ring) {
1430 ret = -EBUSY;
1431 goto err_txc_ring;
1432 }
1433
1434 memset(&ring_cfg, 0, sizeof(ring_cfg));
1435 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1436 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1437 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1438
1439 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1440 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1441
1442 if (ret)
1443 goto err_ringcfg;
1444
1445 return 0;
1446
1447err_ringcfg:
1448 k3_ringacc_ring_free(uc->tchan->tc_ring);
1449 uc->tchan->tc_ring = NULL;
1450err_txc_ring:
1451 k3_ringacc_ring_free(uc->tchan->t_ring);
1452 uc->tchan->t_ring = NULL;
1453err_tx_ring:
1454 udma_put_tchan(uc);
1455
1456 return ret;
1457}
1458
1459static void udma_free_rx_resources(struct udma_chan *uc)
1460{
1461 if (!uc->rchan)
1462 return;
1463
1464 if (uc->rflow) {
1465 struct udma_rflow *rflow = uc->rflow;
1466
1467 k3_ringacc_ring_free(rflow->fd_ring);
1468 k3_ringacc_ring_free(rflow->r_ring);
1469 rflow->fd_ring = NULL;
1470 rflow->r_ring = NULL;
1471
1472 udma_put_rflow(uc);
1473 }
1474
1475 udma_put_rchan(uc);
1476}
1477
1478static int udma_alloc_rx_resources(struct udma_chan *uc)
1479{
1480 struct udma_dev *ud = uc->ud;
1481 struct k3_ring_cfg ring_cfg;
1482 struct udma_rflow *rflow;
1483 int fd_ring_id;
1484 int ret;
1485
1486 ret = udma_get_rchan(uc);
1487 if (ret)
1488 return ret;
1489
1490 /* For MEM_TO_MEM we don't need rflow or rings */
1491 if (uc->config.dir == DMA_MEM_TO_MEM)
1492 return 0;
1493
1494 ret = udma_get_rflow(uc, uc->rchan->id);
1495 if (ret) {
1496 ret = -EBUSY;
1497 goto err_rflow;
1498 }
1499
1500 rflow = uc->rflow;
1501 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1502 rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
1503 if (!rflow->fd_ring) {
1504 ret = -EBUSY;
1505 goto err_rx_ring;
1506 }
1507
1508 rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1509 if (!rflow->r_ring) {
1510 ret = -EBUSY;
1511 goto err_rxc_ring;
1512 }
1513
1514 memset(&ring_cfg, 0, sizeof(ring_cfg));
1515
1516 if (uc->config.pkt_mode)
1517 ring_cfg.size = SG_MAX_SEGMENTS;
1518 else
1519 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1520
1521 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1522 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1523
1524 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1525 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1526 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1527
1528 if (ret)
1529 goto err_ringcfg;
1530
1531 return 0;
1532
1533err_ringcfg:
1534 k3_ringacc_ring_free(rflow->r_ring);
1535 rflow->r_ring = NULL;
1536err_rxc_ring:
1537 k3_ringacc_ring_free(rflow->fd_ring);
1538 rflow->fd_ring = NULL;
1539err_rx_ring:
1540 udma_put_rflow(uc);
1541err_rflow:
1542 udma_put_rchan(uc);
1543
1544 return ret;
1545}
1546
1547#define TISCI_TCHAN_VALID_PARAMS ( \
1548 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1549 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1550 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1551 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1552 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1553 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001554 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1555 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001556
1557#define TISCI_RCHAN_VALID_PARAMS ( \
1558 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1559 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1560 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1561 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1562 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1563 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1564 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001565 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1566 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001567
1568static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1569{
1570 struct udma_dev *ud = uc->ud;
1571 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1572 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1573 struct udma_tchan *tchan = uc->tchan;
1574 struct udma_rchan *rchan = uc->rchan;
1575 int ret = 0;
1576
1577 /* Non synchronized - mem to mem type of transfer */
1578 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1579 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1580 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1581
1582 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1583 req_tx.nav_id = tisci_rm->tisci_dev_id;
1584 req_tx.index = tchan->id;
1585 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1586 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1587 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001588 req_tx.tx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001589
1590 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1591 if (ret) {
1592 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1593 return ret;
1594 }
1595
1596 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1597 req_rx.nav_id = tisci_rm->tisci_dev_id;
1598 req_rx.index = rchan->id;
1599 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1600 req_rx.rxcq_qnum = tc_ring;
1601 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001602 req_rx.rx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001603
1604 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1605 if (ret)
1606 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1607
1608 return ret;
1609}
1610
1611static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1612{
1613 struct udma_dev *ud = uc->ud;
1614 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1615 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1616 struct udma_tchan *tchan = uc->tchan;
1617 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1618 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1619 u32 mode, fetch_size;
1620 int ret = 0;
1621
1622 if (uc->config.pkt_mode) {
1623 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1624 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1625 uc->config.psd_size, 0);
1626 } else {
1627 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1628 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1629 }
1630
1631 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1632 req_tx.nav_id = tisci_rm->tisci_dev_id;
1633 req_tx.index = tchan->id;
1634 req_tx.tx_chan_type = mode;
1635 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1636 req_tx.tx_fetch_size = fetch_size >> 2;
1637 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001638 req_tx.tx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001639
1640 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1641 if (ret)
1642 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1643
1644 return ret;
1645}
1646
1647static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1648{
1649 struct udma_dev *ud = uc->ud;
1650 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1651 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1652 struct udma_rchan *rchan = uc->rchan;
1653 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1654 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1655 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1656 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1657 u32 mode, fetch_size;
1658 int ret = 0;
1659
1660 if (uc->config.pkt_mode) {
1661 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1662 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1663 uc->config.psd_size, 0);
1664 } else {
1665 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1666 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1667 }
1668
1669 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1670 req_rx.nav_id = tisci_rm->tisci_dev_id;
1671 req_rx.index = rchan->id;
1672 req_rx.rx_fetch_size = fetch_size >> 2;
1673 req_rx.rxcq_qnum = rx_ring;
1674 req_rx.rx_chan_type = mode;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001675 req_rx.rx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001676
1677 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1678 if (ret) {
1679 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1680 return ret;
1681 }
1682
1683 flow_req.valid_params =
1684 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1685 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1686 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1687 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1688 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1689 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1690 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1691 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1692 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1693 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1694 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1695 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1696 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1697
1698 flow_req.nav_id = tisci_rm->tisci_dev_id;
1699 flow_req.flow_index = rchan->id;
1700
1701 if (uc->config.needs_epib)
1702 flow_req.rx_einfo_present = 1;
1703 else
1704 flow_req.rx_einfo_present = 0;
1705 if (uc->config.psd_size)
1706 flow_req.rx_psinfo_present = 1;
1707 else
1708 flow_req.rx_psinfo_present = 0;
1709 flow_req.rx_error_handling = 1;
1710 flow_req.rx_dest_qnum = rx_ring;
1711 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1712 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1713 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1714 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1715 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1716 flow_req.rx_fdq1_qnum = fd_ring;
1717 flow_req.rx_fdq2_qnum = fd_ring;
1718 flow_req.rx_fdq3_qnum = fd_ring;
1719
1720 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1721
1722 if (ret)
1723 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1724
1725 return 0;
1726}
1727
1728static int udma_alloc_chan_resources(struct dma_chan *chan)
1729{
1730 struct udma_chan *uc = to_udma_chan(chan);
1731 struct udma_dev *ud = to_udma_dev(chan->device);
1732 const struct udma_match_data *match_data = ud->match_data;
1733 struct k3_ring *irq_ring;
1734 u32 irq_udma_idx;
1735 int ret;
1736
1737 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1738 uc->use_dma_pool = true;
1739 /* in case of MEM_TO_MEM we have maximum of two TRs */
1740 if (uc->config.dir == DMA_MEM_TO_MEM) {
1741 uc->config.hdesc_size = cppi5_trdesc_calc_size(
1742 sizeof(struct cppi5_tr_type15_t), 2);
1743 uc->config.pkt_mode = false;
1744 }
1745 }
1746
1747 if (uc->use_dma_pool) {
1748 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1749 uc->config.hdesc_size,
1750 ud->desc_align,
1751 0);
1752 if (!uc->hdesc_pool) {
1753 dev_err(ud->ddev.dev,
1754 "Descriptor pool allocation failed\n");
1755 uc->use_dma_pool = false;
1756 return -ENOMEM;
1757 }
1758 }
1759
1760 /*
1761 * Make sure that the completion is in a known state:
1762 * No teardown, the channel is idle
1763 */
1764 reinit_completion(&uc->teardown_completed);
1765 complete_all(&uc->teardown_completed);
1766 uc->state = UDMA_CHAN_IS_IDLE;
1767
1768 switch (uc->config.dir) {
1769 case DMA_MEM_TO_MEM:
1770 /* Non synchronized - mem to mem type of transfer */
1771 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1772 uc->id);
1773
1774 ret = udma_get_chan_pair(uc);
1775 if (ret)
1776 return ret;
1777
1778 ret = udma_alloc_tx_resources(uc);
1779 if (ret)
1780 return ret;
1781
1782 ret = udma_alloc_rx_resources(uc);
1783 if (ret) {
1784 udma_free_tx_resources(uc);
1785 return ret;
1786 }
1787
1788 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1789 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1790 K3_PSIL_DST_THREAD_ID_OFFSET;
1791
1792 irq_ring = uc->tchan->tc_ring;
1793 irq_udma_idx = uc->tchan->id;
1794
1795 ret = udma_tisci_m2m_channel_config(uc);
1796 break;
1797 case DMA_MEM_TO_DEV:
1798 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1799 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1800 uc->id);
1801
1802 ret = udma_alloc_tx_resources(uc);
1803 if (ret) {
1804 uc->config.remote_thread_id = -1;
1805 return ret;
1806 }
1807
1808 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1809 uc->config.dst_thread = uc->config.remote_thread_id;
1810 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1811
1812 irq_ring = uc->tchan->tc_ring;
1813 irq_udma_idx = uc->tchan->id;
1814
1815 ret = udma_tisci_tx_channel_config(uc);
1816 break;
1817 case DMA_DEV_TO_MEM:
1818 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1819 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1820 uc->id);
1821
1822 ret = udma_alloc_rx_resources(uc);
1823 if (ret) {
1824 uc->config.remote_thread_id = -1;
1825 return ret;
1826 }
1827
1828 uc->config.src_thread = uc->config.remote_thread_id;
1829 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1830 K3_PSIL_DST_THREAD_ID_OFFSET;
1831
1832 irq_ring = uc->rflow->r_ring;
1833 irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
1834
1835 ret = udma_tisci_rx_channel_config(uc);
1836 break;
1837 default:
1838 /* Can not happen */
1839 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1840 __func__, uc->id, uc->config.dir);
1841 return -EINVAL;
1842 }
1843
1844 /* check if the channel configuration was successful */
1845 if (ret)
1846 goto err_res_free;
1847
1848 if (udma_is_chan_running(uc)) {
1849 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1850 udma_stop(uc);
1851 if (udma_is_chan_running(uc)) {
1852 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1853 goto err_res_free;
1854 }
1855 }
1856
1857 /* PSI-L pairing */
1858 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1859 if (ret) {
1860 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1861 uc->config.src_thread, uc->config.dst_thread);
1862 goto err_res_free;
1863 }
1864
1865 uc->psil_paired = true;
1866
1867 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1868 if (uc->irq_num_ring <= 0) {
1869 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1870 k3_ringacc_get_ring_id(irq_ring));
1871 ret = -EINVAL;
1872 goto err_psi_free;
1873 }
1874
1875 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1876 IRQF_TRIGGER_HIGH, uc->name, uc);
1877 if (ret) {
1878 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1879 goto err_irq_free;
1880 }
1881
1882 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1883 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1884 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1885 irq_udma_idx);
1886 if (uc->irq_num_udma <= 0) {
1887 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1888 irq_udma_idx);
1889 free_irq(uc->irq_num_ring, uc);
1890 ret = -EINVAL;
1891 goto err_irq_free;
1892 }
1893
1894 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1895 uc->name, uc);
1896 if (ret) {
1897 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1898 uc->id);
1899 free_irq(uc->irq_num_ring, uc);
1900 goto err_irq_free;
1901 }
1902 } else {
1903 uc->irq_num_udma = 0;
1904 }
1905
1906 udma_reset_rings(uc);
1907
1908 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
1909 udma_check_tx_completion);
1910 return 0;
1911
1912err_irq_free:
1913 uc->irq_num_ring = 0;
1914 uc->irq_num_udma = 0;
1915err_psi_free:
1916 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1917 uc->psil_paired = false;
1918err_res_free:
1919 udma_free_tx_resources(uc);
1920 udma_free_rx_resources(uc);
1921
1922 udma_reset_uchan(uc);
1923
1924 if (uc->use_dma_pool) {
1925 dma_pool_destroy(uc->hdesc_pool);
1926 uc->use_dma_pool = false;
1927 }
1928
1929 return ret;
1930}
1931
1932static int udma_slave_config(struct dma_chan *chan,
1933 struct dma_slave_config *cfg)
1934{
1935 struct udma_chan *uc = to_udma_chan(chan);
1936
1937 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1938
1939 return 0;
1940}
1941
1942static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1943 size_t tr_size, int tr_count,
1944 enum dma_transfer_direction dir)
1945{
1946 struct udma_hwdesc *hwdesc;
1947 struct cppi5_desc_hdr_t *tr_desc;
1948 struct udma_desc *d;
1949 u32 reload_count = 0;
1950 u32 ring_id;
1951
1952 switch (tr_size) {
1953 case 16:
1954 case 32:
1955 case 64:
1956 case 128:
1957 break;
1958 default:
1959 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1960 return NULL;
1961 }
1962
1963 /* We have only one descriptor containing multiple TRs */
1964 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1965 if (!d)
1966 return NULL;
1967
1968 d->sglen = tr_count;
1969
1970 d->hwdesc_count = 1;
1971 hwdesc = &d->hwdesc[0];
1972
1973 /* Allocate memory for DMA ring descriptor */
1974 if (uc->use_dma_pool) {
1975 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1976 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1977 GFP_NOWAIT,
1978 &hwdesc->cppi5_desc_paddr);
1979 } else {
1980 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1981 tr_count);
1982 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1983 uc->ud->desc_align);
1984 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1985 hwdesc->cppi5_desc_size,
1986 &hwdesc->cppi5_desc_paddr,
1987 GFP_NOWAIT);
1988 }
1989
1990 if (!hwdesc->cppi5_desc_vaddr) {
1991 kfree(d);
1992 return NULL;
1993 }
1994
1995 /* Start of the TR req records */
1996 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
1997 /* Start address of the TR response array */
1998 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
1999
2000 tr_desc = hwdesc->cppi5_desc_vaddr;
2001
2002 if (uc->cyclic)
2003 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2004
2005 if (dir == DMA_DEV_TO_MEM)
2006 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2007 else
2008 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2009
2010 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2011 cppi5_desc_set_pktids(tr_desc, uc->id,
2012 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2013 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2014
2015 return d;
2016}
2017
Peter Ujfalusia9793402020-02-14 11:14:38 +02002018/**
2019 * udma_get_tr_counters - calculate TR counters for a given length
2020 * @len: Length of the trasnfer
2021 * @align_to: Preferred alignment
2022 * @tr0_cnt0: First TR icnt0
2023 * @tr0_cnt1: First TR icnt1
2024 * @tr1_cnt0: Second (if used) TR icnt0
2025 *
2026 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2027 * For len >= SZ_64K two TRs are used in a simple way:
2028 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2029 * Second TR: the remaining length (tr1_cnt0)
2030 *
2031 * Returns the number of TRs the length needs (1 or 2)
2032 * -EINVAL if the length can not be supported
2033 */
2034static int udma_get_tr_counters(size_t len, unsigned long align_to,
2035 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2036{
2037 if (len < SZ_64K) {
2038 *tr0_cnt0 = len;
2039 *tr0_cnt1 = 1;
2040
2041 return 1;
2042 }
2043
2044 if (align_to > 3)
2045 align_to = 3;
2046
2047realign:
2048 *tr0_cnt0 = SZ_64K - BIT(align_to);
2049 if (len / *tr0_cnt0 >= SZ_64K) {
2050 if (align_to) {
2051 align_to--;
2052 goto realign;
2053 }
2054 return -EINVAL;
2055 }
2056
2057 *tr0_cnt1 = len / *tr0_cnt0;
2058 *tr1_cnt0 = len % *tr0_cnt0;
2059
2060 return 2;
2061}
2062
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002063static struct udma_desc *
2064udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2065 unsigned int sglen, enum dma_transfer_direction dir,
2066 unsigned long tx_flags, void *context)
2067{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002068 struct scatterlist *sgent;
2069 struct udma_desc *d;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002070 struct cppi5_tr_type1_t *tr_req = NULL;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002071 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002072 unsigned int i;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002073 size_t tr_size;
2074 int num_tr = 0;
2075 int tr_idx = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002076
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002077 if (!is_slave_direction(dir)) {
2078 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002079 return NULL;
2080 }
2081
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002082 /* estimate the number of TRs we will need */
2083 for_each_sg(sgl, sgent, sglen, i) {
2084 if (sg_dma_len(sgent) < SZ_64K)
2085 num_tr++;
2086 else
2087 num_tr += 2;
2088 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002089
2090 /* Now allocate and setup the descriptor. */
2091 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002092 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002093 if (!d)
2094 return NULL;
2095
2096 d->sglen = sglen;
2097
2098 tr_req = d->hwdesc[0].tr_req_base;
2099 for_each_sg(sgl, sgent, sglen, i) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002100 dma_addr_t sg_addr = sg_dma_address(sgent);
2101
2102 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2103 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2104 if (num_tr < 0) {
2105 dev_err(uc->ud->dev, "size %u is not supported\n",
2106 sg_dma_len(sgent));
2107 udma_free_hwdesc(uc, d);
2108 kfree(d);
2109 return NULL;
2110 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002111
2112 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2113 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2114 cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
2115
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002116 tr_req[tr_idx].addr = sg_addr;
2117 tr_req[tr_idx].icnt0 = tr0_cnt0;
2118 tr_req[tr_idx].icnt1 = tr0_cnt1;
2119 tr_req[tr_idx].dim1 = tr0_cnt0;
2120 tr_idx++;
2121
2122 if (num_tr == 2) {
2123 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2124 false, false,
2125 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2126 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2127 CPPI5_TR_CSF_SUPR_EVT);
2128
2129 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2130 tr_req[tr_idx].icnt0 = tr1_cnt0;
2131 tr_req[tr_idx].icnt1 = 1;
2132 tr_req[tr_idx].dim1 = tr1_cnt0;
2133 tr_idx++;
2134 }
2135
2136 d->residue += sg_dma_len(sgent);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002137 }
2138
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002139 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002140
2141 return d;
2142}
2143
2144static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2145 enum dma_slave_buswidth dev_width,
2146 u16 elcnt)
2147{
2148 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2149 return 0;
2150
2151 /* Bus width translates to the element size (ES) */
2152 switch (dev_width) {
2153 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2154 d->static_tr.elsize = 0;
2155 break;
2156 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2157 d->static_tr.elsize = 1;
2158 break;
2159 case DMA_SLAVE_BUSWIDTH_3_BYTES:
2160 d->static_tr.elsize = 2;
2161 break;
2162 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2163 d->static_tr.elsize = 3;
2164 break;
2165 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2166 d->static_tr.elsize = 4;
2167 break;
2168 default: /* not reached */
2169 return -EINVAL;
2170 }
2171
2172 d->static_tr.elcnt = elcnt;
2173
2174 /*
2175 * PDMA must to close the packet when the channel is in packet mode.
2176 * For TR mode when the channel is not cyclic we also need PDMA to close
2177 * the packet otherwise the transfer will stall because PDMA holds on
2178 * the data it has received from the peripheral.
2179 */
2180 if (uc->config.pkt_mode || !uc->cyclic) {
2181 unsigned int div = dev_width * elcnt;
2182
2183 if (uc->cyclic)
2184 d->static_tr.bstcnt = d->residue / d->sglen / div;
2185 else
2186 d->static_tr.bstcnt = d->residue / div;
2187
2188 if (uc->config.dir == DMA_DEV_TO_MEM &&
2189 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2190 return -EINVAL;
2191 } else {
2192 d->static_tr.bstcnt = 0;
2193 }
2194
2195 return 0;
2196}
2197
2198static struct udma_desc *
2199udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2200 unsigned int sglen, enum dma_transfer_direction dir,
2201 unsigned long tx_flags, void *context)
2202{
2203 struct scatterlist *sgent;
2204 struct cppi5_host_desc_t *h_desc = NULL;
2205 struct udma_desc *d;
2206 u32 ring_id;
2207 unsigned int i;
2208
2209 d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2210 if (!d)
2211 return NULL;
2212
2213 d->sglen = sglen;
2214 d->hwdesc_count = sglen;
2215
2216 if (dir == DMA_DEV_TO_MEM)
2217 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2218 else
2219 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2220
2221 for_each_sg(sgl, sgent, sglen, i) {
2222 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2223 dma_addr_t sg_addr = sg_dma_address(sgent);
2224 struct cppi5_host_desc_t *desc;
2225 size_t sg_len = sg_dma_len(sgent);
2226
2227 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2228 GFP_NOWAIT,
2229 &hwdesc->cppi5_desc_paddr);
2230 if (!hwdesc->cppi5_desc_vaddr) {
2231 dev_err(uc->ud->dev,
2232 "descriptor%d allocation failed\n", i);
2233
2234 udma_free_hwdesc(uc, d);
2235 kfree(d);
2236 return NULL;
2237 }
2238
2239 d->residue += sg_len;
2240 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2241 desc = hwdesc->cppi5_desc_vaddr;
2242
2243 if (i == 0) {
2244 cppi5_hdesc_init(desc, 0, 0);
2245 /* Flow and Packed ID */
2246 cppi5_desc_set_pktids(&desc->hdr, uc->id,
2247 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2248 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2249 } else {
2250 cppi5_hdesc_reset_hbdesc(desc);
2251 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2252 }
2253
2254 /* attach the sg buffer to the descriptor */
2255 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2256
2257 /* Attach link as host buffer descriptor */
2258 if (h_desc)
2259 cppi5_hdesc_link_hbdesc(h_desc,
2260 hwdesc->cppi5_desc_paddr);
2261
2262 if (dir == DMA_MEM_TO_DEV)
2263 h_desc = desc;
2264 }
2265
2266 if (d->residue >= SZ_4M) {
2267 dev_err(uc->ud->dev,
2268 "%s: Transfer size %u is over the supported 4M range\n",
2269 __func__, d->residue);
2270 udma_free_hwdesc(uc, d);
2271 kfree(d);
2272 return NULL;
2273 }
2274
2275 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2276 cppi5_hdesc_set_pktlen(h_desc, d->residue);
2277
2278 return d;
2279}
2280
2281static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2282 void *data, size_t len)
2283{
2284 struct udma_desc *d = to_udma_desc(desc);
2285 struct udma_chan *uc = to_udma_chan(desc->chan);
2286 struct cppi5_host_desc_t *h_desc;
2287 u32 psd_size = len;
2288 u32 flags = 0;
2289
2290 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2291 return -ENOTSUPP;
2292
2293 if (!data || len > uc->config.metadata_size)
2294 return -EINVAL;
2295
2296 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2297 return -EINVAL;
2298
2299 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2300 if (d->dir == DMA_MEM_TO_DEV)
2301 memcpy(h_desc->epib, data, len);
2302
2303 if (uc->config.needs_epib)
2304 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2305
2306 d->metadata = data;
2307 d->metadata_size = len;
2308 if (uc->config.needs_epib)
2309 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2310
2311 cppi5_hdesc_update_flags(h_desc, flags);
2312 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2313
2314 return 0;
2315}
2316
2317static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2318 size_t *payload_len, size_t *max_len)
2319{
2320 struct udma_desc *d = to_udma_desc(desc);
2321 struct udma_chan *uc = to_udma_chan(desc->chan);
2322 struct cppi5_host_desc_t *h_desc;
2323
2324 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2325 return ERR_PTR(-ENOTSUPP);
2326
2327 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2328
2329 *max_len = uc->config.metadata_size;
2330
2331 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2332 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2333 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2334
2335 return h_desc->epib;
2336}
2337
2338static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2339 size_t payload_len)
2340{
2341 struct udma_desc *d = to_udma_desc(desc);
2342 struct udma_chan *uc = to_udma_chan(desc->chan);
2343 struct cppi5_host_desc_t *h_desc;
2344 u32 psd_size = payload_len;
2345 u32 flags = 0;
2346
2347 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2348 return -ENOTSUPP;
2349
2350 if (payload_len > uc->config.metadata_size)
2351 return -EINVAL;
2352
2353 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2354 return -EINVAL;
2355
2356 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2357
2358 if (uc->config.needs_epib) {
2359 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2360 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2361 }
2362
2363 cppi5_hdesc_update_flags(h_desc, flags);
2364 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2365
2366 return 0;
2367}
2368
2369static struct dma_descriptor_metadata_ops metadata_ops = {
2370 .attach = udma_attach_metadata,
2371 .get_ptr = udma_get_metadata_ptr,
2372 .set_len = udma_set_metadata_len,
2373};
2374
2375static struct dma_async_tx_descriptor *
2376udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2377 unsigned int sglen, enum dma_transfer_direction dir,
2378 unsigned long tx_flags, void *context)
2379{
2380 struct udma_chan *uc = to_udma_chan(chan);
2381 enum dma_slave_buswidth dev_width;
2382 struct udma_desc *d;
2383 u32 burst;
2384
2385 if (dir != uc->config.dir) {
2386 dev_err(chan->device->dev,
2387 "%s: chan%d is for %s, not supporting %s\n",
2388 __func__, uc->id,
2389 dmaengine_get_direction_text(uc->config.dir),
2390 dmaengine_get_direction_text(dir));
2391 return NULL;
2392 }
2393
2394 if (dir == DMA_DEV_TO_MEM) {
2395 dev_width = uc->cfg.src_addr_width;
2396 burst = uc->cfg.src_maxburst;
2397 } else if (dir == DMA_MEM_TO_DEV) {
2398 dev_width = uc->cfg.dst_addr_width;
2399 burst = uc->cfg.dst_maxburst;
2400 } else {
2401 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2402 return NULL;
2403 }
2404
2405 if (!burst)
2406 burst = 1;
2407
2408 if (uc->config.pkt_mode)
2409 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2410 context);
2411 else
2412 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2413 context);
2414
2415 if (!d)
2416 return NULL;
2417
2418 d->dir = dir;
2419 d->desc_idx = 0;
2420 d->tr_idx = 0;
2421
2422 /* static TR for remote PDMA */
2423 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2424 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002425 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002426 __func__, d->static_tr.bstcnt);
2427
2428 udma_free_hwdesc(uc, d);
2429 kfree(d);
2430 return NULL;
2431 }
2432
2433 if (uc->config.metadata_size)
2434 d->vd.tx.metadata_ops = &metadata_ops;
2435
2436 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2437}
2438
2439static struct udma_desc *
2440udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2441 size_t buf_len, size_t period_len,
2442 enum dma_transfer_direction dir, unsigned long flags)
2443{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002444 struct udma_desc *d;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002445 size_t tr_size, period_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002446 struct cppi5_tr_type1_t *tr_req;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002447 unsigned int periods = buf_len / period_len;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002448 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2449 unsigned int i;
2450 int num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002451
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002452 if (!is_slave_direction(dir)) {
2453 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002454 return NULL;
2455 }
2456
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002457 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2458 &tr0_cnt1, &tr1_cnt0);
2459 if (num_tr < 0) {
2460 dev_err(uc->ud->dev, "size %zu is not supported\n",
2461 period_len);
2462 return NULL;
2463 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002464
2465 /* Now allocate and setup the descriptor. */
2466 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002467 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002468 if (!d)
2469 return NULL;
2470
2471 tr_req = d->hwdesc[0].tr_req_base;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002472 period_addr = buf_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002473 for (i = 0; i < periods; i++) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002474 int tr_idx = i * num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002475
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002476 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2477 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2478
2479 tr_req[tr_idx].addr = period_addr;
2480 tr_req[tr_idx].icnt0 = tr0_cnt0;
2481 tr_req[tr_idx].icnt1 = tr0_cnt1;
2482 tr_req[tr_idx].dim1 = tr0_cnt0;
2483
2484 if (num_tr == 2) {
2485 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2486 CPPI5_TR_CSF_SUPR_EVT);
2487 tr_idx++;
2488
2489 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2490 false, false,
2491 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2492
2493 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2494 tr_req[tr_idx].icnt0 = tr1_cnt0;
2495 tr_req[tr_idx].icnt1 = 1;
2496 tr_req[tr_idx].dim1 = tr1_cnt0;
2497 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002498
2499 if (!(flags & DMA_PREP_INTERRUPT))
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002500 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002501 CPPI5_TR_CSF_SUPR_EVT);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002502
2503 period_addr += period_len;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002504 }
2505
2506 return d;
2507}
2508
2509static struct udma_desc *
2510udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2511 size_t buf_len, size_t period_len,
2512 enum dma_transfer_direction dir, unsigned long flags)
2513{
2514 struct udma_desc *d;
2515 u32 ring_id;
2516 int i;
2517 int periods = buf_len / period_len;
2518
2519 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2520 return NULL;
2521
2522 if (period_len >= SZ_4M)
2523 return NULL;
2524
2525 d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2526 if (!d)
2527 return NULL;
2528
2529 d->hwdesc_count = periods;
2530
2531 /* TODO: re-check this... */
2532 if (dir == DMA_DEV_TO_MEM)
2533 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2534 else
2535 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2536
2537 for (i = 0; i < periods; i++) {
2538 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2539 dma_addr_t period_addr = buf_addr + (period_len * i);
2540 struct cppi5_host_desc_t *h_desc;
2541
2542 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2543 GFP_NOWAIT,
2544 &hwdesc->cppi5_desc_paddr);
2545 if (!hwdesc->cppi5_desc_vaddr) {
2546 dev_err(uc->ud->dev,
2547 "descriptor%d allocation failed\n", i);
2548
2549 udma_free_hwdesc(uc, d);
2550 kfree(d);
2551 return NULL;
2552 }
2553
2554 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2555 h_desc = hwdesc->cppi5_desc_vaddr;
2556
2557 cppi5_hdesc_init(h_desc, 0, 0);
2558 cppi5_hdesc_set_pktlen(h_desc, period_len);
2559
2560 /* Flow and Packed ID */
2561 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2562 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2563 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2564
2565 /* attach each period to a new descriptor */
2566 cppi5_hdesc_attach_buf(h_desc,
2567 period_addr, period_len,
2568 period_addr, period_len);
2569 }
2570
2571 return d;
2572}
2573
2574static struct dma_async_tx_descriptor *
2575udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2576 size_t period_len, enum dma_transfer_direction dir,
2577 unsigned long flags)
2578{
2579 struct udma_chan *uc = to_udma_chan(chan);
2580 enum dma_slave_buswidth dev_width;
2581 struct udma_desc *d;
2582 u32 burst;
2583
2584 if (dir != uc->config.dir) {
2585 dev_err(chan->device->dev,
2586 "%s: chan%d is for %s, not supporting %s\n",
2587 __func__, uc->id,
2588 dmaengine_get_direction_text(uc->config.dir),
2589 dmaengine_get_direction_text(dir));
2590 return NULL;
2591 }
2592
2593 uc->cyclic = true;
2594
2595 if (dir == DMA_DEV_TO_MEM) {
2596 dev_width = uc->cfg.src_addr_width;
2597 burst = uc->cfg.src_maxburst;
2598 } else if (dir == DMA_MEM_TO_DEV) {
2599 dev_width = uc->cfg.dst_addr_width;
2600 burst = uc->cfg.dst_maxburst;
2601 } else {
2602 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2603 return NULL;
2604 }
2605
2606 if (!burst)
2607 burst = 1;
2608
2609 if (uc->config.pkt_mode)
2610 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2611 dir, flags);
2612 else
2613 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2614 dir, flags);
2615
2616 if (!d)
2617 return NULL;
2618
2619 d->sglen = buf_len / period_len;
2620
2621 d->dir = dir;
2622 d->residue = buf_len;
2623
2624 /* static TR for remote PDMA */
2625 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2626 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002627 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002628 __func__, d->static_tr.bstcnt);
2629
2630 udma_free_hwdesc(uc, d);
2631 kfree(d);
2632 return NULL;
2633 }
2634
2635 if (uc->config.metadata_size)
2636 d->vd.tx.metadata_ops = &metadata_ops;
2637
2638 return vchan_tx_prep(&uc->vc, &d->vd, flags);
2639}
2640
2641static struct dma_async_tx_descriptor *
2642udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2643 size_t len, unsigned long tx_flags)
2644{
2645 struct udma_chan *uc = to_udma_chan(chan);
2646 struct udma_desc *d;
2647 struct cppi5_tr_type15_t *tr_req;
2648 int num_tr;
2649 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2650 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2651
2652 if (uc->config.dir != DMA_MEM_TO_MEM) {
2653 dev_err(chan->device->dev,
2654 "%s: chan%d is for %s, not supporting %s\n",
2655 __func__, uc->id,
2656 dmaengine_get_direction_text(uc->config.dir),
2657 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2658 return NULL;
2659 }
2660
Peter Ujfalusia9793402020-02-14 11:14:38 +02002661 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2662 &tr0_cnt1, &tr1_cnt0);
2663 if (num_tr < 0) {
2664 dev_err(uc->ud->dev, "size %zu is not supported\n",
2665 len);
2666 return NULL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002667 }
2668
2669 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2670 if (!d)
2671 return NULL;
2672
2673 d->dir = DMA_MEM_TO_MEM;
2674 d->desc_idx = 0;
2675 d->tr_idx = 0;
2676 d->residue = len;
2677
2678 tr_req = d->hwdesc[0].tr_req_base;
2679
2680 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2681 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2682 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2683
2684 tr_req[0].addr = src;
2685 tr_req[0].icnt0 = tr0_cnt0;
2686 tr_req[0].icnt1 = tr0_cnt1;
2687 tr_req[0].icnt2 = 1;
2688 tr_req[0].icnt3 = 1;
2689 tr_req[0].dim1 = tr0_cnt0;
2690
2691 tr_req[0].daddr = dest;
2692 tr_req[0].dicnt0 = tr0_cnt0;
2693 tr_req[0].dicnt1 = tr0_cnt1;
2694 tr_req[0].dicnt2 = 1;
2695 tr_req[0].dicnt3 = 1;
2696 tr_req[0].ddim1 = tr0_cnt0;
2697
2698 if (num_tr == 2) {
2699 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2700 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2701 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2702
2703 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2704 tr_req[1].icnt0 = tr1_cnt0;
2705 tr_req[1].icnt1 = 1;
2706 tr_req[1].icnt2 = 1;
2707 tr_req[1].icnt3 = 1;
2708
2709 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2710 tr_req[1].dicnt0 = tr1_cnt0;
2711 tr_req[1].dicnt1 = 1;
2712 tr_req[1].dicnt2 = 1;
2713 tr_req[1].dicnt3 = 1;
2714 }
2715
2716 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
2717
2718 if (uc->config.metadata_size)
2719 d->vd.tx.metadata_ops = &metadata_ops;
2720
2721 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2722}
2723
2724static void udma_issue_pending(struct dma_chan *chan)
2725{
2726 struct udma_chan *uc = to_udma_chan(chan);
2727 unsigned long flags;
2728
2729 spin_lock_irqsave(&uc->vc.lock, flags);
2730
2731 /* If we have something pending and no active descriptor, then */
2732 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2733 /*
2734 * start a descriptor if the channel is NOT [marked as
2735 * terminating _and_ it is still running (teardown has not
2736 * completed yet)].
2737 */
2738 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2739 udma_is_chan_running(uc)))
2740 udma_start(uc);
2741 }
2742
2743 spin_unlock_irqrestore(&uc->vc.lock, flags);
2744}
2745
2746static enum dma_status udma_tx_status(struct dma_chan *chan,
2747 dma_cookie_t cookie,
2748 struct dma_tx_state *txstate)
2749{
2750 struct udma_chan *uc = to_udma_chan(chan);
2751 enum dma_status ret;
2752 unsigned long flags;
2753
2754 spin_lock_irqsave(&uc->vc.lock, flags);
2755
2756 ret = dma_cookie_status(chan, cookie, txstate);
2757
Peter Ujfalusi83903182020-02-14 11:14:41 +02002758 if (!udma_is_chan_running(uc))
2759 ret = DMA_COMPLETE;
2760
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002761 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2762 ret = DMA_PAUSED;
2763
2764 if (ret == DMA_COMPLETE || !txstate)
2765 goto out;
2766
2767 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2768 u32 peer_bcnt = 0;
2769 u32 bcnt = 0;
2770 u32 residue = uc->desc->residue;
2771 u32 delay = 0;
2772
2773 if (uc->desc->dir == DMA_MEM_TO_DEV) {
2774 bcnt = udma_tchanrt_read(uc->tchan,
2775 UDMA_TCHAN_RT_SBCNT_REG);
2776
2777 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2778 peer_bcnt = udma_tchanrt_read(uc->tchan,
2779 UDMA_TCHAN_RT_PEER_BCNT_REG);
2780
2781 if (bcnt > peer_bcnt)
2782 delay = bcnt - peer_bcnt;
2783 }
2784 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2785 bcnt = udma_rchanrt_read(uc->rchan,
2786 UDMA_RCHAN_RT_BCNT_REG);
2787
2788 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2789 peer_bcnt = udma_rchanrt_read(uc->rchan,
2790 UDMA_RCHAN_RT_PEER_BCNT_REG);
2791
2792 if (peer_bcnt > bcnt)
2793 delay = peer_bcnt - bcnt;
2794 }
2795 } else {
2796 bcnt = udma_tchanrt_read(uc->tchan,
2797 UDMA_TCHAN_RT_BCNT_REG);
2798 }
2799
2800 bcnt -= uc->bcnt;
2801 if (bcnt && !(bcnt % uc->desc->residue))
2802 residue = 0;
2803 else
2804 residue -= bcnt % uc->desc->residue;
2805
2806 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2807 ret = DMA_COMPLETE;
2808 delay = 0;
2809 }
2810
2811 dma_set_residue(txstate, residue);
2812 dma_set_in_flight_bytes(txstate, delay);
2813
2814 } else {
2815 ret = DMA_COMPLETE;
2816 }
2817
2818out:
2819 spin_unlock_irqrestore(&uc->vc.lock, flags);
2820 return ret;
2821}
2822
2823static int udma_pause(struct dma_chan *chan)
2824{
2825 struct udma_chan *uc = to_udma_chan(chan);
2826
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002827 /* pause the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002828 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002829 case DMA_DEV_TO_MEM:
2830 udma_rchanrt_update_bits(uc->rchan,
2831 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2832 UDMA_PEER_RT_EN_PAUSE,
2833 UDMA_PEER_RT_EN_PAUSE);
2834 break;
2835 case DMA_MEM_TO_DEV:
2836 udma_tchanrt_update_bits(uc->tchan,
2837 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2838 UDMA_PEER_RT_EN_PAUSE,
2839 UDMA_PEER_RT_EN_PAUSE);
2840 break;
2841 case DMA_MEM_TO_MEM:
2842 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2843 UDMA_CHAN_RT_CTL_PAUSE,
2844 UDMA_CHAN_RT_CTL_PAUSE);
2845 break;
2846 default:
2847 return -EINVAL;
2848 }
2849
2850 return 0;
2851}
2852
2853static int udma_resume(struct dma_chan *chan)
2854{
2855 struct udma_chan *uc = to_udma_chan(chan);
2856
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002857 /* resume the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002858 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002859 case DMA_DEV_TO_MEM:
2860 udma_rchanrt_update_bits(uc->rchan,
2861 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2862 UDMA_PEER_RT_EN_PAUSE, 0);
2863
2864 break;
2865 case DMA_MEM_TO_DEV:
2866 udma_tchanrt_update_bits(uc->tchan,
2867 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2868 UDMA_PEER_RT_EN_PAUSE, 0);
2869 break;
2870 case DMA_MEM_TO_MEM:
2871 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2872 UDMA_CHAN_RT_CTL_PAUSE, 0);
2873 break;
2874 default:
2875 return -EINVAL;
2876 }
2877
2878 return 0;
2879}
2880
2881static int udma_terminate_all(struct dma_chan *chan)
2882{
2883 struct udma_chan *uc = to_udma_chan(chan);
2884 unsigned long flags;
2885 LIST_HEAD(head);
2886
2887 spin_lock_irqsave(&uc->vc.lock, flags);
2888
2889 if (udma_is_chan_running(uc))
2890 udma_stop(uc);
2891
2892 if (uc->desc) {
2893 uc->terminated_desc = uc->desc;
2894 uc->desc = NULL;
2895 uc->terminated_desc->terminated = true;
2896 cancel_delayed_work(&uc->tx_drain.work);
2897 }
2898
2899 uc->paused = false;
2900
2901 vchan_get_all_descriptors(&uc->vc, &head);
2902 spin_unlock_irqrestore(&uc->vc.lock, flags);
2903 vchan_dma_desc_free_list(&uc->vc, &head);
2904
2905 return 0;
2906}
2907
2908static void udma_synchronize(struct dma_chan *chan)
2909{
2910 struct udma_chan *uc = to_udma_chan(chan);
2911 unsigned long timeout = msecs_to_jiffies(1000);
2912
2913 vchan_synchronize(&uc->vc);
2914
2915 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2916 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2917 timeout);
2918 if (!timeout) {
2919 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2920 uc->id);
2921 udma_dump_chan_stdata(uc);
2922 udma_reset_chan(uc, true);
2923 }
2924 }
2925
2926 udma_reset_chan(uc, false);
2927 if (udma_is_chan_running(uc))
2928 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2929
2930 cancel_delayed_work_sync(&uc->tx_drain.work);
2931 udma_reset_rings(uc);
2932}
2933
2934static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2935 struct virt_dma_desc *vd,
2936 struct dmaengine_result *result)
2937{
2938 struct udma_chan *uc = to_udma_chan(&vc->chan);
2939 struct udma_desc *d;
2940
2941 if (!vd)
2942 return;
2943
2944 d = to_udma_desc(&vd->tx);
2945
2946 if (d->metadata_size)
2947 udma_fetch_epib(uc, d);
2948
2949 /* Provide residue information for the client */
2950 if (result) {
2951 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2952
2953 if (cppi5_desc_get_type(desc_vaddr) ==
2954 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2955 result->residue = d->residue -
2956 cppi5_hdesc_get_pktlen(desc_vaddr);
2957 if (result->residue)
2958 result->result = DMA_TRANS_ABORTED;
2959 else
2960 result->result = DMA_TRANS_NOERROR;
2961 } else {
2962 result->residue = 0;
2963 result->result = DMA_TRANS_NOERROR;
2964 }
2965 }
2966}
2967
2968/*
2969 * This tasklet handles the completion of a DMA descriptor by
2970 * calling its callback and freeing it.
2971 */
2972static void udma_vchan_complete(unsigned long arg)
2973{
2974 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
2975 struct virt_dma_desc *vd, *_vd;
2976 struct dmaengine_desc_callback cb;
2977 LIST_HEAD(head);
2978
2979 spin_lock_irq(&vc->lock);
2980 list_splice_tail_init(&vc->desc_completed, &head);
2981 vd = vc->cyclic;
2982 if (vd) {
2983 vc->cyclic = NULL;
2984 dmaengine_desc_get_callback(&vd->tx, &cb);
2985 } else {
2986 memset(&cb, 0, sizeof(cb));
2987 }
2988 spin_unlock_irq(&vc->lock);
2989
2990 udma_desc_pre_callback(vc, vd, NULL);
2991 dmaengine_desc_callback_invoke(&cb, NULL);
2992
2993 list_for_each_entry_safe(vd, _vd, &head, node) {
2994 struct dmaengine_result result;
2995
2996 dmaengine_desc_get_callback(&vd->tx, &cb);
2997
2998 list_del(&vd->node);
2999
3000 udma_desc_pre_callback(vc, vd, &result);
3001 dmaengine_desc_callback_invoke(&cb, &result);
3002
3003 vchan_vdesc_fini(vd);
3004 }
3005}
3006
3007static void udma_free_chan_resources(struct dma_chan *chan)
3008{
3009 struct udma_chan *uc = to_udma_chan(chan);
3010 struct udma_dev *ud = to_udma_dev(chan->device);
3011
3012 udma_terminate_all(chan);
3013 if (uc->terminated_desc) {
3014 udma_reset_chan(uc, false);
3015 udma_reset_rings(uc);
3016 }
3017
3018 cancel_delayed_work_sync(&uc->tx_drain.work);
3019 destroy_delayed_work_on_stack(&uc->tx_drain.work);
3020
3021 if (uc->irq_num_ring > 0) {
3022 free_irq(uc->irq_num_ring, uc);
3023
3024 uc->irq_num_ring = 0;
3025 }
3026 if (uc->irq_num_udma > 0) {
3027 free_irq(uc->irq_num_udma, uc);
3028
3029 uc->irq_num_udma = 0;
3030 }
3031
3032 /* Release PSI-L pairing */
3033 if (uc->psil_paired) {
3034 navss_psil_unpair(ud, uc->config.src_thread,
3035 uc->config.dst_thread);
3036 uc->psil_paired = false;
3037 }
3038
3039 vchan_free_chan_resources(&uc->vc);
3040 tasklet_kill(&uc->vc.task);
3041
3042 udma_free_tx_resources(uc);
3043 udma_free_rx_resources(uc);
3044 udma_reset_uchan(uc);
3045
3046 if (uc->use_dma_pool) {
3047 dma_pool_destroy(uc->hdesc_pool);
3048 uc->use_dma_pool = false;
3049 }
3050}
3051
3052static struct platform_driver udma_driver;
3053
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003054struct udma_filter_param {
3055 int remote_thread_id;
3056 u32 atype;
3057};
3058
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003059static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
3060{
3061 struct udma_chan_config *ucc;
3062 struct psil_endpoint_config *ep_config;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003063 struct udma_filter_param *filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003064 struct udma_chan *uc;
3065 struct udma_dev *ud;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003066
3067 if (chan->device->dev->driver != &udma_driver.driver)
3068 return false;
3069
3070 uc = to_udma_chan(chan);
3071 ucc = &uc->config;
3072 ud = uc->ud;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003073 filter_param = param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003074
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003075 if (filter_param->atype > 2) {
3076 dev_err(ud->dev, "Invalid channel atype: %u\n",
3077 filter_param->atype);
3078 return false;
3079 }
3080
3081 ucc->remote_thread_id = filter_param->remote_thread_id;
3082 ucc->atype = filter_param->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003083
3084 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3085 ucc->dir = DMA_MEM_TO_DEV;
3086 else
3087 ucc->dir = DMA_DEV_TO_MEM;
3088
3089 ep_config = psil_get_ep_config(ucc->remote_thread_id);
3090 if (IS_ERR(ep_config)) {
3091 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3092 ucc->remote_thread_id);
3093 ucc->dir = DMA_MEM_TO_MEM;
3094 ucc->remote_thread_id = -1;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003095 ucc->atype = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003096 return false;
3097 }
3098
3099 ucc->pkt_mode = ep_config->pkt_mode;
3100 ucc->channel_tpl = ep_config->channel_tpl;
3101 ucc->notdpkt = ep_config->notdpkt;
3102 ucc->ep_type = ep_config->ep_type;
3103
3104 if (ucc->ep_type != PSIL_EP_NATIVE) {
3105 const struct udma_match_data *match_data = ud->match_data;
3106
3107 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3108 ucc->enable_acc32 = ep_config->pdma_acc32;
3109 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3110 ucc->enable_burst = ep_config->pdma_burst;
3111 }
3112
3113 ucc->needs_epib = ep_config->needs_epib;
3114 ucc->psd_size = ep_config->psd_size;
3115 ucc->metadata_size =
3116 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3117 ucc->psd_size;
3118
3119 if (ucc->pkt_mode)
3120 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3121 ucc->metadata_size, ud->desc_align);
3122
3123 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3124 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3125
3126 return true;
3127}
3128
3129static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3130 struct of_dma *ofdma)
3131{
3132 struct udma_dev *ud = ofdma->of_dma_data;
3133 dma_cap_mask_t mask = ud->ddev.cap_mask;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003134 struct udma_filter_param filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003135 struct dma_chan *chan;
3136
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003137 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003138 return NULL;
3139
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003140 filter_param.remote_thread_id = dma_spec->args[0];
3141 if (dma_spec->args_count == 2)
3142 filter_param.atype = dma_spec->args[1];
3143 else
3144 filter_param.atype = 0;
3145
3146 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
3147 ofdma->of_node);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003148 if (!chan) {
3149 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3150 return ERR_PTR(-EINVAL);
3151 }
3152
3153 return chan;
3154}
3155
3156static struct udma_match_data am654_main_data = {
3157 .psil_base = 0x1000,
3158 .enable_memcpy_support = true,
3159 .statictr_z_mask = GENMASK(11, 0),
3160 .rchan_oes_offset = 0x2000,
3161 .tpl_levels = 2,
3162 .level_start_idx = {
3163 [0] = 8, /* Normal channels */
3164 [1] = 0, /* High Throughput channels */
3165 },
3166};
3167
3168static struct udma_match_data am654_mcu_data = {
3169 .psil_base = 0x6000,
Peter Ujfalusia4e68852020-03-27 16:42:28 +02003170 .enable_memcpy_support = false,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003171 .statictr_z_mask = GENMASK(11, 0),
3172 .rchan_oes_offset = 0x2000,
3173 .tpl_levels = 2,
3174 .level_start_idx = {
3175 [0] = 2, /* Normal channels */
3176 [1] = 0, /* High Throughput channels */
3177 },
3178};
3179
3180static struct udma_match_data j721e_main_data = {
3181 .psil_base = 0x1000,
3182 .enable_memcpy_support = true,
3183 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3184 .statictr_z_mask = GENMASK(23, 0),
3185 .rchan_oes_offset = 0x400,
3186 .tpl_levels = 3,
3187 .level_start_idx = {
3188 [0] = 16, /* Normal channels */
3189 [1] = 4, /* High Throughput channels */
3190 [2] = 0, /* Ultra High Throughput channels */
3191 },
3192};
3193
3194static struct udma_match_data j721e_mcu_data = {
3195 .psil_base = 0x6000,
3196 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3197 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3198 .statictr_z_mask = GENMASK(23, 0),
3199 .rchan_oes_offset = 0x400,
3200 .tpl_levels = 2,
3201 .level_start_idx = {
3202 [0] = 2, /* Normal channels */
3203 [1] = 0, /* High Throughput channels */
3204 },
3205};
3206
3207static const struct of_device_id udma_of_match[] = {
3208 {
3209 .compatible = "ti,am654-navss-main-udmap",
3210 .data = &am654_main_data,
3211 },
3212 {
3213 .compatible = "ti,am654-navss-mcu-udmap",
3214 .data = &am654_mcu_data,
3215 }, {
3216 .compatible = "ti,j721e-navss-main-udmap",
3217 .data = &j721e_main_data,
3218 }, {
3219 .compatible = "ti,j721e-navss-mcu-udmap",
3220 .data = &j721e_mcu_data,
3221 },
3222 { /* Sentinel */ },
3223};
3224
3225static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3226{
3227 struct resource *res;
3228 int i;
3229
3230 for (i = 0; i < MMR_LAST; i++) {
3231 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3232 mmr_names[i]);
3233 ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
3234 if (IS_ERR(ud->mmrs[i]))
3235 return PTR_ERR(ud->mmrs[i]);
3236 }
3237
3238 return 0;
3239}
3240
3241static int udma_setup_resources(struct udma_dev *ud)
3242{
3243 struct device *dev = ud->dev;
3244 int ch_count, ret, i, j;
3245 u32 cap2, cap3;
3246 struct ti_sci_resource_desc *rm_desc;
3247 struct ti_sci_resource *rm_res, irq_res;
3248 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3249 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3250 "ti,sci-rm-range-rchan",
3251 "ti,sci-rm-range-rflow" };
3252
3253 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
3254 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
3255
3256 ud->rflow_cnt = cap3 & 0x3fff;
3257 ud->tchan_cnt = cap2 & 0x1ff;
3258 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
3259 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
3260 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3261
3262 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3263 sizeof(unsigned long), GFP_KERNEL);
3264 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3265 GFP_KERNEL);
3266 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3267 sizeof(unsigned long), GFP_KERNEL);
3268 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3269 GFP_KERNEL);
3270 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3271 sizeof(unsigned long),
3272 GFP_KERNEL);
3273 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3274 BITS_TO_LONGS(ud->rflow_cnt),
3275 sizeof(unsigned long),
3276 GFP_KERNEL);
3277 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3278 sizeof(unsigned long),
3279 GFP_KERNEL);
3280 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3281 GFP_KERNEL);
3282
3283 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3284 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3285 !ud->rflows || !ud->rflow_in_use)
3286 return -ENOMEM;
3287
3288 /*
3289 * RX flows with the same Ids as RX channels are reserved to be used
3290 * as default flows if remote HW can't generate flow_ids. Those
3291 * RX flows can be requested only explicitly by id.
3292 */
3293 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3294
3295 /* by default no GP rflows are assigned to Linux */
3296 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3297
3298 /* Get resource ranges from tisci */
3299 for (i = 0; i < RM_RANGE_LAST; i++)
3300 tisci_rm->rm_ranges[i] =
3301 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3302 tisci_rm->tisci_dev_id,
3303 (char *)range_names[i]);
3304
3305 /* tchan ranges */
3306 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3307 if (IS_ERR(rm_res)) {
3308 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3309 } else {
3310 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3311 for (i = 0; i < rm_res->sets; i++) {
3312 rm_desc = &rm_res->desc[i];
3313 bitmap_clear(ud->tchan_map, rm_desc->start,
3314 rm_desc->num);
3315 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3316 rm_desc->start, rm_desc->num);
3317 }
3318 }
3319 irq_res.sets = rm_res->sets;
3320
3321 /* rchan and matching default flow ranges */
3322 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3323 if (IS_ERR(rm_res)) {
3324 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3325 } else {
3326 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3327 for (i = 0; i < rm_res->sets; i++) {
3328 rm_desc = &rm_res->desc[i];
3329 bitmap_clear(ud->rchan_map, rm_desc->start,
3330 rm_desc->num);
3331 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3332 rm_desc->start, rm_desc->num);
3333 }
3334 }
3335
3336 irq_res.sets += rm_res->sets;
3337 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3338 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3339 for (i = 0; i < rm_res->sets; i++) {
3340 irq_res.desc[i].start = rm_res->desc[i].start;
3341 irq_res.desc[i].num = rm_res->desc[i].num;
3342 }
3343 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3344 for (j = 0; j < rm_res->sets; j++, i++) {
3345 irq_res.desc[i].start = rm_res->desc[j].start +
3346 ud->match_data->rchan_oes_offset;
3347 irq_res.desc[i].num = rm_res->desc[j].num;
3348 }
3349 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3350 kfree(irq_res.desc);
3351 if (ret) {
3352 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3353 return ret;
3354 }
3355
3356 /* GP rflow ranges */
3357 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3358 if (IS_ERR(rm_res)) {
3359 /* all gp flows are assigned exclusively to Linux */
3360 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3361 ud->rflow_cnt - ud->rchan_cnt);
3362 } else {
3363 for (i = 0; i < rm_res->sets; i++) {
3364 rm_desc = &rm_res->desc[i];
3365 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3366 rm_desc->num);
3367 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3368 rm_desc->start, rm_desc->num);
3369 }
3370 }
3371
3372 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3373 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3374 if (!ch_count)
3375 return -ENODEV;
3376
3377 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3378 GFP_KERNEL);
3379 if (!ud->channels)
3380 return -ENOMEM;
3381
3382 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3383 ch_count,
3384 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3385 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3386 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3387 ud->rflow_cnt));
3388
3389 return ch_count;
3390}
3391
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003392static int udma_setup_rx_flush(struct udma_dev *ud)
3393{
3394 struct udma_rx_flush *rx_flush = &ud->rx_flush;
3395 struct cppi5_desc_hdr_t *tr_desc;
3396 struct cppi5_tr_type1_t *tr_req;
3397 struct cppi5_host_desc_t *desc;
3398 struct device *dev = ud->dev;
3399 struct udma_hwdesc *hwdesc;
3400 size_t tr_size;
3401
3402 /* Allocate 1K buffer for discarded data on RX channel teardown */
3403 rx_flush->buffer_size = SZ_1K;
3404 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3405 GFP_KERNEL);
3406 if (!rx_flush->buffer_vaddr)
3407 return -ENOMEM;
3408
3409 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3410 rx_flush->buffer_size,
3411 DMA_TO_DEVICE);
3412 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3413 return -ENOMEM;
3414
3415 /* Set up descriptor to be used for TR mode */
3416 hwdesc = &rx_flush->hwdescs[0];
3417 tr_size = sizeof(struct cppi5_tr_type1_t);
3418 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3419 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3420 ud->desc_align);
3421
3422 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3423 GFP_KERNEL);
3424 if (!hwdesc->cppi5_desc_vaddr)
3425 return -ENOMEM;
3426
3427 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3428 hwdesc->cppi5_desc_size,
3429 DMA_TO_DEVICE);
3430 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3431 return -ENOMEM;
3432
3433 /* Start of the TR req records */
3434 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3435 /* Start address of the TR response array */
3436 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3437
3438 tr_desc = hwdesc->cppi5_desc_vaddr;
3439 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3440 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3441 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3442
3443 tr_req = hwdesc->tr_req_base;
3444 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3445 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3446 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3447
3448 tr_req->addr = rx_flush->buffer_paddr;
3449 tr_req->icnt0 = rx_flush->buffer_size;
3450 tr_req->icnt1 = 1;
3451
Peter Ujfalusi5bbeea32020-05-12 16:45:44 +03003452 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3453 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3454
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003455 /* Set up descriptor to be used for packet mode */
3456 hwdesc = &rx_flush->hwdescs[1];
3457 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3458 CPPI5_INFO0_HDESC_EPIB_SIZE +
3459 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3460 ud->desc_align);
3461
3462 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3463 GFP_KERNEL);
3464 if (!hwdesc->cppi5_desc_vaddr)
3465 return -ENOMEM;
3466
3467 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3468 hwdesc->cppi5_desc_size,
3469 DMA_TO_DEVICE);
3470 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3471 return -ENOMEM;
3472
3473 desc = hwdesc->cppi5_desc_vaddr;
3474 cppi5_hdesc_init(desc, 0, 0);
3475 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3476 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3477
3478 cppi5_hdesc_attach_buf(desc,
3479 rx_flush->buffer_paddr, rx_flush->buffer_size,
3480 rx_flush->buffer_paddr, rx_flush->buffer_size);
3481
3482 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3483 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3484 return 0;
3485}
3486
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003487#ifdef CONFIG_DEBUG_FS
3488static void udma_dbg_summary_show_chan(struct seq_file *s,
3489 struct dma_chan *chan)
3490{
3491 struct udma_chan *uc = to_udma_chan(chan);
3492 struct udma_chan_config *ucc = &uc->config;
3493
3494 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
3495 chan->dbg_client_name ?: "in-use");
3496 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
3497
3498 switch (uc->config.dir) {
3499 case DMA_MEM_TO_MEM:
3500 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
3501 ucc->src_thread, ucc->dst_thread);
3502 break;
3503 case DMA_DEV_TO_MEM:
3504 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
3505 ucc->src_thread, ucc->dst_thread);
3506 break;
3507 case DMA_MEM_TO_DEV:
3508 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
3509 ucc->src_thread, ucc->dst_thread);
3510 break;
3511 default:
3512 seq_printf(s, ")\n");
3513 return;
3514 }
3515
3516 if (ucc->ep_type == PSIL_EP_NATIVE) {
3517 seq_printf(s, "PSI-L Native");
3518 if (ucc->metadata_size) {
3519 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
3520 if (ucc->psd_size)
3521 seq_printf(s, " PSDsize:%u", ucc->psd_size);
3522 seq_printf(s, " ]");
3523 }
3524 } else {
3525 seq_printf(s, "PDMA");
3526 if (ucc->enable_acc32 || ucc->enable_burst)
3527 seq_printf(s, "[%s%s ]",
3528 ucc->enable_acc32 ? " ACC32" : "",
3529 ucc->enable_burst ? " BURST" : "");
3530 }
3531
3532 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
3533}
3534
3535static void udma_dbg_summary_show(struct seq_file *s,
3536 struct dma_device *dma_dev)
3537{
3538 struct dma_chan *chan;
3539
3540 list_for_each_entry(chan, &dma_dev->channels, device_node) {
3541 if (chan->client_count)
3542 udma_dbg_summary_show_chan(s, chan);
3543 }
3544}
3545#endif /* CONFIG_DEBUG_FS */
3546
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003547#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3548 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3549 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3550 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3551 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3552
3553static int udma_probe(struct platform_device *pdev)
3554{
3555 struct device_node *navss_node = pdev->dev.parent->of_node;
3556 struct device *dev = &pdev->dev;
3557 struct udma_dev *ud;
3558 const struct of_device_id *match;
3559 int i, ret;
3560 int ch_count;
3561
3562 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3563 if (ret)
3564 dev_err(dev, "failed to set dma mask stuff\n");
3565
3566 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3567 if (!ud)
3568 return -ENOMEM;
3569
3570 ret = udma_get_mmrs(pdev, ud);
3571 if (ret)
3572 return ret;
3573
3574 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3575 if (IS_ERR(ud->tisci_rm.tisci))
3576 return PTR_ERR(ud->tisci_rm.tisci);
3577
3578 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3579 &ud->tisci_rm.tisci_dev_id);
3580 if (ret) {
3581 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3582 return ret;
3583 }
3584 pdev->id = ud->tisci_rm.tisci_dev_id;
3585
3586 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3587 &ud->tisci_rm.tisci_navss_dev_id);
3588 if (ret) {
3589 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3590 return ret;
3591 }
3592
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003593 ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
3594 if (!ret && ud->atype > 2) {
3595 dev_err(dev, "Invalid atype: %u\n", ud->atype);
3596 return -EINVAL;
3597 }
3598
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003599 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3600 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3601
3602 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3603 if (IS_ERR(ud->ringacc))
3604 return PTR_ERR(ud->ringacc);
3605
3606 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3607 DOMAIN_BUS_TI_SCI_INTA_MSI);
3608 if (!dev->msi_domain) {
3609 dev_err(dev, "Failed to get MSI domain\n");
3610 return -EPROBE_DEFER;
3611 }
3612
3613 match = of_match_node(udma_of_match, dev->of_node);
3614 if (!match) {
3615 dev_err(dev, "No compatible match found\n");
3616 return -ENODEV;
3617 }
3618 ud->match_data = match->data;
3619
3620 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3621 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3622
3623 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3624 ud->ddev.device_config = udma_slave_config;
3625 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3626 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3627 ud->ddev.device_issue_pending = udma_issue_pending;
3628 ud->ddev.device_tx_status = udma_tx_status;
3629 ud->ddev.device_pause = udma_pause;
3630 ud->ddev.device_resume = udma_resume;
3631 ud->ddev.device_terminate_all = udma_terminate_all;
3632 ud->ddev.device_synchronize = udma_synchronize;
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003633#ifdef CONFIG_DEBUG_FS
3634 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3635#endif
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003636
3637 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3638 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3639 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3640 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3641 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3642 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3643 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3644 DESC_METADATA_ENGINE;
3645 if (ud->match_data->enable_memcpy_support) {
3646 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3647 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3648 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3649 }
3650
3651 ud->ddev.dev = dev;
3652 ud->dev = dev;
3653 ud->psil_base = ud->match_data->psil_base;
3654
3655 INIT_LIST_HEAD(&ud->ddev.channels);
3656 INIT_LIST_HEAD(&ud->desc_to_purge);
3657
3658 ch_count = udma_setup_resources(ud);
3659 if (ch_count <= 0)
3660 return ch_count;
3661
3662 spin_lock_init(&ud->lock);
3663 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3664
3665 ud->desc_align = 64;
3666 if (ud->desc_align < dma_get_cache_alignment())
3667 ud->desc_align = dma_get_cache_alignment();
3668
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003669 ret = udma_setup_rx_flush(ud);
3670 if (ret)
3671 return ret;
3672
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003673 for (i = 0; i < ud->tchan_cnt; i++) {
3674 struct udma_tchan *tchan = &ud->tchans[i];
3675
3676 tchan->id = i;
3677 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3678 }
3679
3680 for (i = 0; i < ud->rchan_cnt; i++) {
3681 struct udma_rchan *rchan = &ud->rchans[i];
3682
3683 rchan->id = i;
3684 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3685 }
3686
3687 for (i = 0; i < ud->rflow_cnt; i++) {
3688 struct udma_rflow *rflow = &ud->rflows[i];
3689
3690 rflow->id = i;
3691 }
3692
3693 for (i = 0; i < ch_count; i++) {
3694 struct udma_chan *uc = &ud->channels[i];
3695
3696 uc->ud = ud;
3697 uc->vc.desc_free = udma_desc_free;
3698 uc->id = i;
3699 uc->tchan = NULL;
3700 uc->rchan = NULL;
3701 uc->config.remote_thread_id = -1;
3702 uc->config.dir = DMA_MEM_TO_MEM;
3703 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3704 dev_name(dev), i);
3705
3706 vchan_init(&uc->vc, &ud->ddev);
3707 /* Use custom vchan completion handling */
3708 tasklet_init(&uc->vc.task, udma_vchan_complete,
3709 (unsigned long)&uc->vc);
3710 init_completion(&uc->teardown_completed);
3711 }
3712
3713 ret = dma_async_device_register(&ud->ddev);
3714 if (ret) {
3715 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3716 return ret;
3717 }
3718
3719 platform_set_drvdata(pdev, ud);
3720
3721 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3722 if (ret) {
3723 dev_err(dev, "failed to register of_dma controller\n");
3724 dma_async_device_unregister(&ud->ddev);
3725 }
3726
3727 return ret;
3728}
3729
3730static struct platform_driver udma_driver = {
3731 .driver = {
3732 .name = "ti-udma",
3733 .of_match_table = udma_of_match,
3734 .suppress_bind_attrs = true,
3735 },
3736 .probe = udma_probe,
3737};
3738builtin_platform_driver(udma_driver);
Grygorii Strashkod7024192019-12-23 13:04:51 +02003739
3740/* Private interfaces to UDMA */
3741#include "k3-udma-private.c"