blob: 08f58d7ed8d5084c7009f0bdcd5814f09da03417 [file] [log] [blame]
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7#include <linux/kernel.h>
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02008#include <linux/delay.h>
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02009#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmapool.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_device.h>
22#include <linux/of_irq.h>
23#include <linux/workqueue.h>
24#include <linux/completion.h>
25#include <linux/soc/ti/k3-ringacc.h>
26#include <linux/soc/ti/ti_sci_protocol.h>
27#include <linux/soc/ti/ti_sci_inta_msi.h>
28#include <linux/dma/ti-cppi5.h>
29
30#include "../virt-dma.h"
31#include "k3-udma.h"
32#include "k3-psil-priv.h"
33
34struct udma_static_tr {
35 u8 elsize; /* RPSTR0 */
36 u16 elcnt; /* RPSTR0 */
37 u16 bstcnt; /* RPSTR1 */
38};
39
40#define K3_UDMA_MAX_RFLOWS 1024
41#define K3_UDMA_DEFAULT_RING_SIZE 16
42
43/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44#define UDMA_RFLOW_SRCTAG_NONE 0
45#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
46#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
47#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
48
49#define UDMA_RFLOW_DSTTAG_NONE 0
50#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
51#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
52#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
53#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
54
55struct udma_chan;
56
57enum udma_mmr {
58 MMR_GCFG = 0,
59 MMR_RCHANRT,
60 MMR_TCHANRT,
61 MMR_LAST,
62};
63
64static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
65
66struct udma_tchan {
67 void __iomem *reg_rt;
68
69 int id;
70 struct k3_ring *t_ring; /* Transmit ring */
71 struct k3_ring *tc_ring; /* Transmit Completion ring */
72};
73
74struct udma_rflow {
75 int id;
76 struct k3_ring *fd_ring; /* Free Descriptor ring */
77 struct k3_ring *r_ring; /* Receive ring */
78};
79
80struct udma_rchan {
81 void __iomem *reg_rt;
82
83 int id;
84};
85
86#define UDMA_FLAG_PDMA_ACC32 BIT(0)
87#define UDMA_FLAG_PDMA_BURST BIT(1)
88
89struct udma_match_data {
90 u32 psil_base;
91 bool enable_memcpy_support;
92 u32 flags;
93 u32 statictr_z_mask;
94 u32 rchan_oes_offset;
95
96 u8 tpl_levels;
97 u32 level_start_idx[];
98};
99
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200100struct udma_hwdesc {
101 size_t cppi5_desc_size;
102 void *cppi5_desc_vaddr;
103 dma_addr_t cppi5_desc_paddr;
104
105 /* TR descriptor internal pointers */
106 void *tr_req_base;
107 struct cppi5_tr_resp_t *tr_resp_base;
108};
109
110struct udma_rx_flush {
111 struct udma_hwdesc hwdescs[2];
112
113 size_t buffer_size;
114 void *buffer_vaddr;
115 dma_addr_t buffer_paddr;
116};
117
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200118struct udma_dev {
119 struct dma_device ddev;
120 struct device *dev;
121 void __iomem *mmrs[MMR_LAST];
122 const struct udma_match_data *match_data;
123
124 size_t desc_align; /* alignment to use for descriptors */
125
126 struct udma_tisci_rm tisci_rm;
127
128 struct k3_ringacc *ringacc;
129
130 struct work_struct purge_work;
131 struct list_head desc_to_purge;
132 spinlock_t lock;
133
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200134 struct udma_rx_flush rx_flush;
135
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200136 int tchan_cnt;
137 int echan_cnt;
138 int rchan_cnt;
139 int rflow_cnt;
140 unsigned long *tchan_map;
141 unsigned long *rchan_map;
142 unsigned long *rflow_gp_map;
143 unsigned long *rflow_gp_map_allocated;
144 unsigned long *rflow_in_use;
145
146 struct udma_tchan *tchans;
147 struct udma_rchan *rchans;
148 struct udma_rflow *rflows;
149
150 struct udma_chan *channels;
151 u32 psil_base;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200152 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200153};
154
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200155struct udma_desc {
156 struct virt_dma_desc vd;
157
158 bool terminated;
159
160 enum dma_transfer_direction dir;
161
162 struct udma_static_tr static_tr;
163 u32 residue;
164
165 unsigned int sglen;
166 unsigned int desc_idx; /* Only used for cyclic in packet mode */
167 unsigned int tr_idx;
168
169 u32 metadata_size;
170 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
171
172 unsigned int hwdesc_count;
173 struct udma_hwdesc hwdesc[0];
174};
175
176enum udma_chan_state {
177 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
178 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
179 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
180};
181
182struct udma_tx_drain {
183 struct delayed_work work;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200184 ktime_t tstamp;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200185 u32 residue;
186};
187
188struct udma_chan_config {
189 bool pkt_mode; /* TR or packet */
190 bool needs_epib; /* EPIB is needed for the communication or not */
191 u32 psd_size; /* size of Protocol Specific Data */
192 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
193 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
194 bool notdpkt; /* Suppress sending TDC packet */
195 int remote_thread_id;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200196 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200197 u32 src_thread;
198 u32 dst_thread;
199 enum psil_endpoint_type ep_type;
200 bool enable_acc32;
201 bool enable_burst;
202 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
203
204 enum dma_transfer_direction dir;
205};
206
207struct udma_chan {
208 struct virt_dma_chan vc;
209 struct dma_slave_config cfg;
210 struct udma_dev *ud;
211 struct udma_desc *desc;
212 struct udma_desc *terminated_desc;
213 struct udma_static_tr static_tr;
214 char *name;
215
216 struct udma_tchan *tchan;
217 struct udma_rchan *rchan;
218 struct udma_rflow *rflow;
219
220 bool psil_paired;
221
222 int irq_num_ring;
223 int irq_num_udma;
224
225 bool cyclic;
226 bool paused;
227
228 enum udma_chan_state state;
229 struct completion teardown_completed;
230
231 struct udma_tx_drain tx_drain;
232
233 u32 bcnt; /* number of bytes completed since the start of the channel */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200234
235 /* Channel configuration parameters */
236 struct udma_chan_config config;
237
238 /* dmapool for packet mode descriptors */
239 bool use_dma_pool;
240 struct dma_pool *hdesc_pool;
241
242 u32 id;
243};
244
245static inline struct udma_dev *to_udma_dev(struct dma_device *d)
246{
247 return container_of(d, struct udma_dev, ddev);
248}
249
250static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
251{
252 return container_of(c, struct udma_chan, vc.chan);
253}
254
255static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
256{
257 return container_of(t, struct udma_desc, vd.tx);
258}
259
260/* Generic register access functions */
261static inline u32 udma_read(void __iomem *base, int reg)
262{
263 return readl(base + reg);
264}
265
266static inline void udma_write(void __iomem *base, int reg, u32 val)
267{
268 writel(val, base + reg);
269}
270
271static inline void udma_update_bits(void __iomem *base, int reg,
272 u32 mask, u32 val)
273{
274 u32 tmp, orig;
275
276 orig = readl(base + reg);
277 tmp = orig & ~mask;
278 tmp |= (val & mask);
279
280 if (tmp != orig)
281 writel(tmp, base + reg);
282}
283
284/* TCHANRT */
285static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
286{
287 if (!tchan)
288 return 0;
289 return udma_read(tchan->reg_rt, reg);
290}
291
292static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
293 u32 val)
294{
295 if (!tchan)
296 return;
297 udma_write(tchan->reg_rt, reg, val);
298}
299
300static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
301 u32 mask, u32 val)
302{
303 if (!tchan)
304 return;
305 udma_update_bits(tchan->reg_rt, reg, mask, val);
306}
307
308/* RCHANRT */
309static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
310{
311 if (!rchan)
312 return 0;
313 return udma_read(rchan->reg_rt, reg);
314}
315
316static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
317 u32 val)
318{
319 if (!rchan)
320 return;
321 udma_write(rchan->reg_rt, reg, val);
322}
323
324static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
325 u32 mask, u32 val)
326{
327 if (!rchan)
328 return;
329 udma_update_bits(rchan->reg_rt, reg, mask, val);
330}
331
332static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
333{
334 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
335
336 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
337 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
338 tisci_rm->tisci_navss_dev_id,
339 src_thread, dst_thread);
340}
341
342static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
343 u32 dst_thread)
344{
345 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
346
347 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
348 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
349 tisci_rm->tisci_navss_dev_id,
350 src_thread, dst_thread);
351}
352
353static void udma_reset_uchan(struct udma_chan *uc)
354{
355 memset(&uc->config, 0, sizeof(uc->config));
356 uc->config.remote_thread_id = -1;
357 uc->state = UDMA_CHAN_IS_IDLE;
358}
359
360static void udma_dump_chan_stdata(struct udma_chan *uc)
361{
362 struct device *dev = uc->ud->dev;
363 u32 offset;
364 int i;
365
366 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
367 dev_dbg(dev, "TCHAN State data:\n");
368 for (i = 0; i < 32; i++) {
369 offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
370 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
371 udma_tchanrt_read(uc->tchan, offset));
372 }
373 }
374
375 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
376 dev_dbg(dev, "RCHAN State data:\n");
377 for (i = 0; i < 32; i++) {
378 offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
379 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
380 udma_rchanrt_read(uc->rchan, offset));
381 }
382 }
383}
384
385static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
386 int idx)
387{
388 return d->hwdesc[idx].cppi5_desc_paddr;
389}
390
391static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
392{
393 return d->hwdesc[idx].cppi5_desc_vaddr;
394}
395
396static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
397 dma_addr_t paddr)
398{
399 struct udma_desc *d = uc->terminated_desc;
400
401 if (d) {
402 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
403 d->desc_idx);
404
405 if (desc_paddr != paddr)
406 d = NULL;
407 }
408
409 if (!d) {
410 d = uc->desc;
411 if (d) {
412 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
413 d->desc_idx);
414
415 if (desc_paddr != paddr)
416 d = NULL;
417 }
418 }
419
420 return d;
421}
422
423static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
424{
425 if (uc->use_dma_pool) {
426 int i;
427
428 for (i = 0; i < d->hwdesc_count; i++) {
429 if (!d->hwdesc[i].cppi5_desc_vaddr)
430 continue;
431
432 dma_pool_free(uc->hdesc_pool,
433 d->hwdesc[i].cppi5_desc_vaddr,
434 d->hwdesc[i].cppi5_desc_paddr);
435
436 d->hwdesc[i].cppi5_desc_vaddr = NULL;
437 }
438 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
439 struct udma_dev *ud = uc->ud;
440
441 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
442 d->hwdesc[0].cppi5_desc_vaddr,
443 d->hwdesc[0].cppi5_desc_paddr);
444
445 d->hwdesc[0].cppi5_desc_vaddr = NULL;
446 }
447}
448
449static void udma_purge_desc_work(struct work_struct *work)
450{
451 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
452 struct virt_dma_desc *vd, *_vd;
453 unsigned long flags;
454 LIST_HEAD(head);
455
456 spin_lock_irqsave(&ud->lock, flags);
457 list_splice_tail_init(&ud->desc_to_purge, &head);
458 spin_unlock_irqrestore(&ud->lock, flags);
459
460 list_for_each_entry_safe(vd, _vd, &head, node) {
461 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
462 struct udma_desc *d = to_udma_desc(&vd->tx);
463
464 udma_free_hwdesc(uc, d);
465 list_del(&vd->node);
466 kfree(d);
467 }
468
469 /* If more to purge, schedule the work again */
470 if (!list_empty(&ud->desc_to_purge))
471 schedule_work(&ud->purge_work);
472}
473
474static void udma_desc_free(struct virt_dma_desc *vd)
475{
476 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
477 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
478 struct udma_desc *d = to_udma_desc(&vd->tx);
479 unsigned long flags;
480
481 if (uc->terminated_desc == d)
482 uc->terminated_desc = NULL;
483
484 if (uc->use_dma_pool) {
485 udma_free_hwdesc(uc, d);
486 kfree(d);
487 return;
488 }
489
490 spin_lock_irqsave(&ud->lock, flags);
491 list_add_tail(&vd->node, &ud->desc_to_purge);
492 spin_unlock_irqrestore(&ud->lock, flags);
493
494 schedule_work(&ud->purge_work);
495}
496
497static bool udma_is_chan_running(struct udma_chan *uc)
498{
499 u32 trt_ctl = 0;
500 u32 rrt_ctl = 0;
501
502 if (uc->tchan)
503 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
504 if (uc->rchan)
505 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
506
507 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
508 return true;
509
510 return false;
511}
512
513static bool udma_is_chan_paused(struct udma_chan *uc)
514{
515 u32 val, pause_mask;
516
Peter Ujfalusic7450bb2020-02-14 11:14:40 +0200517 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200518 case DMA_DEV_TO_MEM:
519 val = udma_rchanrt_read(uc->rchan,
520 UDMA_RCHAN_RT_PEER_RT_EN_REG);
521 pause_mask = UDMA_PEER_RT_EN_PAUSE;
522 break;
523 case DMA_MEM_TO_DEV:
524 val = udma_tchanrt_read(uc->tchan,
525 UDMA_TCHAN_RT_PEER_RT_EN_REG);
526 pause_mask = UDMA_PEER_RT_EN_PAUSE;
527 break;
528 case DMA_MEM_TO_MEM:
529 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
530 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
531 break;
532 default:
533 return false;
534 }
535
536 if (val & pause_mask)
537 return true;
538
539 return false;
540}
541
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200542static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
543{
544 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
545}
546
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200547static int udma_push_to_ring(struct udma_chan *uc, int idx)
548{
549 struct udma_desc *d = uc->desc;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200550 struct k3_ring *ring = NULL;
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200551 dma_addr_t paddr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200552
553 switch (uc->config.dir) {
554 case DMA_DEV_TO_MEM:
555 ring = uc->rflow->fd_ring;
556 break;
557 case DMA_MEM_TO_DEV:
558 case DMA_MEM_TO_MEM:
559 ring = uc->tchan->t_ring;
560 break;
561 default:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200562 return -EINVAL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200563 }
564
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200565 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
566 if (idx == -1) {
567 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
568 } else {
569 paddr = udma_curr_cppi5_desc_paddr(d, idx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200570
571 wmb(); /* Ensure that writes are not moved over this point */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200572 }
573
Peter Ujfalusi6fea8732020-05-12 16:46:11 +0300574 return k3_ringacc_ring_push(ring, &paddr);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200575}
576
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200577static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
578{
579 if (uc->config.dir != DMA_DEV_TO_MEM)
580 return false;
581
582 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
583 return true;
584
585 return false;
586}
587
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200588static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
589{
590 struct k3_ring *ring = NULL;
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300591 int ret;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200592
593 switch (uc->config.dir) {
594 case DMA_DEV_TO_MEM:
595 ring = uc->rflow->r_ring;
596 break;
597 case DMA_MEM_TO_DEV:
598 case DMA_MEM_TO_MEM:
599 ring = uc->tchan->tc_ring;
600 break;
601 default:
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300602 return -ENOENT;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200603 }
604
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300605 ret = k3_ringacc_ring_pop(ring, addr);
606 if (ret)
607 return ret;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200608
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300609 rmb(); /* Ensure that reads are not moved before this point */
Peter Ujfalusi2166d962020-07-07 13:23:48 +0300610
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300611 /* Teardown completion */
612 if (cppi5_desc_is_tdcm(*addr))
613 return 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200614
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300615 /* Check for flush descriptor */
616 if (udma_desc_is_rx_flush(uc, *addr))
617 return -ENOENT;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200618
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300619 return 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200620}
621
622static void udma_reset_rings(struct udma_chan *uc)
623{
624 struct k3_ring *ring1 = NULL;
625 struct k3_ring *ring2 = NULL;
626
627 switch (uc->config.dir) {
628 case DMA_DEV_TO_MEM:
629 if (uc->rchan) {
630 ring1 = uc->rflow->fd_ring;
631 ring2 = uc->rflow->r_ring;
632 }
633 break;
634 case DMA_MEM_TO_DEV:
635 case DMA_MEM_TO_MEM:
636 if (uc->tchan) {
637 ring1 = uc->tchan->t_ring;
638 ring2 = uc->tchan->tc_ring;
639 }
640 break;
641 default:
642 break;
643 }
644
645 if (ring1)
646 k3_ringacc_ring_reset_dma(ring1,
647 k3_ringacc_ring_get_occ(ring1));
648 if (ring2)
649 k3_ringacc_ring_reset(ring2);
650
651 /* make sure we are not leaking memory by stalled descriptor */
652 if (uc->terminated_desc) {
653 udma_desc_free(&uc->terminated_desc->vd);
654 uc->terminated_desc = NULL;
655 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200656}
657
658static void udma_reset_counters(struct udma_chan *uc)
659{
660 u32 val;
661
662 if (uc->tchan) {
663 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
664 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
665
666 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
667 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
668
669 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
670 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
671
672 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
673 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
674 }
675
676 if (uc->rchan) {
677 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
678 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
679
680 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
681 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
682
683 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
684 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
685
686 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
687 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
688 }
689
690 uc->bcnt = 0;
691}
692
693static int udma_reset_chan(struct udma_chan *uc, bool hard)
694{
695 switch (uc->config.dir) {
696 case DMA_DEV_TO_MEM:
697 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
698 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
699 break;
700 case DMA_MEM_TO_DEV:
701 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
702 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
703 break;
704 case DMA_MEM_TO_MEM:
705 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
706 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
707 break;
708 default:
709 return -EINVAL;
710 }
711
712 /* Reset all counters */
713 udma_reset_counters(uc);
714
715 /* Hard reset: re-initialize the channel to reset */
716 if (hard) {
717 struct udma_chan_config ucc_backup;
718 int ret;
719
720 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
721 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
722
723 /* restore the channel configuration */
724 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
725 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
726 if (ret)
727 return ret;
728
729 /*
730 * Setting forced teardown after forced reset helps recovering
731 * the rchan.
732 */
733 if (uc->config.dir == DMA_DEV_TO_MEM)
734 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
735 UDMA_CHAN_RT_CTL_EN |
736 UDMA_CHAN_RT_CTL_TDOWN |
737 UDMA_CHAN_RT_CTL_FTDOWN);
738 }
739 uc->state = UDMA_CHAN_IS_IDLE;
740
741 return 0;
742}
743
744static void udma_start_desc(struct udma_chan *uc)
745{
746 struct udma_chan_config *ucc = &uc->config;
747
748 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
749 int i;
750
751 /* Push all descriptors to ring for packet mode cyclic or RX */
752 for (i = 0; i < uc->desc->sglen; i++)
753 udma_push_to_ring(uc, i);
754 } else {
755 udma_push_to_ring(uc, 0);
756 }
757}
758
759static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
760{
761 /* Only PDMAs have staticTR */
762 if (uc->config.ep_type == PSIL_EP_NATIVE)
763 return false;
764
765 /* Check if the staticTR configuration has changed for TX */
766 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
767 return true;
768
769 return false;
770}
771
772static int udma_start(struct udma_chan *uc)
773{
774 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
775
776 if (!vd) {
777 uc->desc = NULL;
778 return -ENOENT;
779 }
780
781 list_del(&vd->node);
782
783 uc->desc = to_udma_desc(&vd->tx);
784
785 /* Channel is already running and does not need reconfiguration */
786 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
787 udma_start_desc(uc);
788 goto out;
789 }
790
791 /* Make sure that we clear the teardown bit, if it is set */
792 udma_reset_chan(uc, false);
793
794 /* Push descriptors before we start the channel */
795 udma_start_desc(uc);
796
797 switch (uc->desc->dir) {
798 case DMA_DEV_TO_MEM:
799 /* Config remote TR */
800 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
801 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
802 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
803 const struct udma_match_data *match_data =
804 uc->ud->match_data;
805
806 if (uc->config.enable_acc32)
807 val |= PDMA_STATIC_TR_XY_ACC32;
808 if (uc->config.enable_burst)
809 val |= PDMA_STATIC_TR_XY_BURST;
810
811 udma_rchanrt_write(uc->rchan,
812 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
813
814 udma_rchanrt_write(uc->rchan,
815 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
816 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
817 match_data->statictr_z_mask));
818
819 /* save the current staticTR configuration */
820 memcpy(&uc->static_tr, &uc->desc->static_tr,
821 sizeof(uc->static_tr));
822 }
823
824 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
825 UDMA_CHAN_RT_CTL_EN);
826
827 /* Enable remote */
828 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
829 UDMA_PEER_RT_EN_ENABLE);
830
831 break;
832 case DMA_MEM_TO_DEV:
833 /* Config remote TR */
834 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
835 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
836 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
837
838 if (uc->config.enable_acc32)
839 val |= PDMA_STATIC_TR_XY_ACC32;
840 if (uc->config.enable_burst)
841 val |= PDMA_STATIC_TR_XY_BURST;
842
843 udma_tchanrt_write(uc->tchan,
844 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
845
846 /* save the current staticTR configuration */
847 memcpy(&uc->static_tr, &uc->desc->static_tr,
848 sizeof(uc->static_tr));
849 }
850
851 /* Enable remote */
852 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
853 UDMA_PEER_RT_EN_ENABLE);
854
855 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
856 UDMA_CHAN_RT_CTL_EN);
857
858 break;
859 case DMA_MEM_TO_MEM:
860 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
861 UDMA_CHAN_RT_CTL_EN);
862 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
863 UDMA_CHAN_RT_CTL_EN);
864
865 break;
866 default:
867 return -EINVAL;
868 }
869
870 uc->state = UDMA_CHAN_IS_ACTIVE;
871out:
872
873 return 0;
874}
875
876static int udma_stop(struct udma_chan *uc)
877{
878 enum udma_chan_state old_state = uc->state;
879
880 uc->state = UDMA_CHAN_IS_TERMINATING;
881 reinit_completion(&uc->teardown_completed);
882
883 switch (uc->config.dir) {
884 case DMA_DEV_TO_MEM:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200885 if (!uc->cyclic && !uc->desc)
886 udma_push_to_ring(uc, -1);
887
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200888 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
889 UDMA_PEER_RT_EN_ENABLE |
890 UDMA_PEER_RT_EN_TEARDOWN);
891 break;
892 case DMA_MEM_TO_DEV:
893 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
894 UDMA_PEER_RT_EN_ENABLE |
895 UDMA_PEER_RT_EN_FLUSH);
896 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
897 UDMA_CHAN_RT_CTL_EN |
898 UDMA_CHAN_RT_CTL_TDOWN);
899 break;
900 case DMA_MEM_TO_MEM:
901 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
902 UDMA_CHAN_RT_CTL_EN |
903 UDMA_CHAN_RT_CTL_TDOWN);
904 break;
905 default:
906 uc->state = old_state;
907 complete_all(&uc->teardown_completed);
908 return -EINVAL;
909 }
910
911 return 0;
912}
913
914static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
915{
916 struct udma_desc *d = uc->desc;
917 struct cppi5_host_desc_t *h_desc;
918
919 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
920 cppi5_hdesc_reset_to_original(h_desc);
921 udma_push_to_ring(uc, d->desc_idx);
922 d->desc_idx = (d->desc_idx + 1) % d->sglen;
923}
924
925static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
926{
927 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
928
929 memcpy(d->metadata, h_desc->epib, d->metadata_size);
930}
931
932static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
933{
934 u32 peer_bcnt, bcnt;
935
936 /* Only TX towards PDMA is affected */
937 if (uc->config.ep_type == PSIL_EP_NATIVE ||
938 uc->config.dir != DMA_MEM_TO_DEV)
939 return true;
940
941 peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
942 bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
943
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200944 /* Transfer is incomplete, store current residue and time stamp */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200945 if (peer_bcnt < bcnt) {
946 uc->tx_drain.residue = bcnt - peer_bcnt;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200947 uc->tx_drain.tstamp = ktime_get();
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200948 return false;
949 }
950
951 return true;
952}
953
954static void udma_check_tx_completion(struct work_struct *work)
955{
956 struct udma_chan *uc = container_of(work, typeof(*uc),
957 tx_drain.work.work);
958 bool desc_done = true;
959 u32 residue_diff;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200960 ktime_t time_diff;
961 unsigned long delay;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200962
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200963 while (1) {
964 if (uc->desc) {
965 /* Get previous residue and time stamp */
966 residue_diff = uc->tx_drain.residue;
967 time_diff = uc->tx_drain.tstamp;
968 /*
969 * Get current residue and time stamp or see if
970 * transfer is complete
971 */
972 desc_done = udma_is_desc_really_done(uc, uc->desc);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200973 }
974
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200975 if (!desc_done) {
976 /*
977 * Find the time delta and residue delta w.r.t
978 * previous poll
979 */
980 time_diff = ktime_sub(uc->tx_drain.tstamp,
981 time_diff) + 1;
982 residue_diff -= uc->tx_drain.residue;
983 if (residue_diff) {
984 /*
985 * Try to guess when we should check
986 * next time by calculating rate at
987 * which data is being drained at the
988 * peer device
989 */
990 delay = (time_diff / residue_diff) *
991 uc->tx_drain.residue;
992 } else {
993 /* No progress, check again in 1 second */
994 schedule_delayed_work(&uc->tx_drain.work, HZ);
995 break;
996 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200997
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200998 usleep_range(ktime_to_us(delay),
999 ktime_to_us(delay) + 10);
1000 continue;
1001 }
1002
1003 if (uc->desc) {
1004 struct udma_desc *d = uc->desc;
1005
1006 uc->bcnt += d->residue;
1007 udma_start(uc);
1008 vchan_cookie_complete(&d->vd);
1009 break;
1010 }
1011
1012 break;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001013 }
1014}
1015
1016static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1017{
1018 struct udma_chan *uc = data;
1019 struct udma_desc *d;
1020 unsigned long flags;
1021 dma_addr_t paddr = 0;
1022
1023 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1024 return IRQ_HANDLED;
1025
1026 spin_lock_irqsave(&uc->vc.lock, flags);
1027
1028 /* Teardown completion message */
1029 if (cppi5_desc_is_tdcm(paddr)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001030 complete_all(&uc->teardown_completed);
1031
1032 if (uc->terminated_desc) {
1033 udma_desc_free(&uc->terminated_desc->vd);
1034 uc->terminated_desc = NULL;
1035 }
1036
1037 if (!uc->desc)
1038 udma_start(uc);
1039
1040 goto out;
1041 }
1042
1043 d = udma_udma_desc_from_paddr(uc, paddr);
1044
1045 if (d) {
1046 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1047 d->desc_idx);
1048 if (desc_paddr != paddr) {
1049 dev_err(uc->ud->dev, "not matching descriptors!\n");
1050 goto out;
1051 }
1052
Peter Ujfalusi83903182020-02-14 11:14:41 +02001053 if (d == uc->desc) {
1054 /* active descriptor */
1055 if (uc->cyclic) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001056 udma_cyclic_packet_elapsed(uc);
1057 vchan_cyclic_callback(&d->vd);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001058 } else {
1059 if (udma_is_desc_really_done(uc, d)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001060 uc->bcnt += d->residue;
1061 udma_start(uc);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001062 vchan_cookie_complete(&d->vd);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001063 } else {
1064 schedule_delayed_work(&uc->tx_drain.work,
1065 0);
1066 }
1067 }
Peter Ujfalusi83903182020-02-14 11:14:41 +02001068 } else {
1069 /*
1070 * terminated descriptor, mark the descriptor as
1071 * completed to update the channel's cookie marker
1072 */
1073 dma_cookie_complete(&d->vd.tx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001074 }
1075 }
1076out:
1077 spin_unlock_irqrestore(&uc->vc.lock, flags);
1078
1079 return IRQ_HANDLED;
1080}
1081
1082static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1083{
1084 struct udma_chan *uc = data;
1085 struct udma_desc *d;
1086 unsigned long flags;
1087
1088 spin_lock_irqsave(&uc->vc.lock, flags);
1089 d = uc->desc;
1090 if (d) {
1091 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1092
1093 if (uc->cyclic) {
1094 vchan_cyclic_callback(&d->vd);
1095 } else {
1096 /* TODO: figure out the real amount of data */
1097 uc->bcnt += d->residue;
1098 udma_start(uc);
1099 vchan_cookie_complete(&d->vd);
1100 }
1101 }
1102
1103 spin_unlock_irqrestore(&uc->vc.lock, flags);
1104
1105 return IRQ_HANDLED;
1106}
1107
Grygorii Strashkod7024192019-12-23 13:04:51 +02001108/**
1109 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1110 * @ud: UDMA device
1111 * @from: Start the search from this flow id number
1112 * @cnt: Number of consecutive flow ids to allocate
1113 *
1114 * Allocate range of RX flow ids for future use, those flows can be requested
1115 * only using explicit flow id number. if @from is set to -1 it will try to find
1116 * first free range. if @from is positive value it will force allocation only
1117 * of the specified range of flows.
1118 *
1119 * Returns -ENOMEM if can't find free range.
1120 * -EEXIST if requested range is busy.
1121 * -EINVAL if wrong input values passed.
1122 * Returns flow id on success.
1123 */
1124static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1125{
1126 int start, tmp_from;
1127 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1128
1129 tmp_from = from;
1130 if (tmp_from < 0)
1131 tmp_from = ud->rchan_cnt;
1132 /* default flows can't be allocated and accessible only by id */
1133 if (tmp_from < ud->rchan_cnt)
1134 return -EINVAL;
1135
1136 if (tmp_from + cnt > ud->rflow_cnt)
1137 return -EINVAL;
1138
1139 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1140 ud->rflow_cnt);
1141
1142 start = bitmap_find_next_zero_area(tmp,
1143 ud->rflow_cnt,
1144 tmp_from, cnt, 0);
1145 if (start >= ud->rflow_cnt)
1146 return -ENOMEM;
1147
1148 if (from >= 0 && start != from)
1149 return -EEXIST;
1150
1151 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1152 return start;
1153}
1154
1155static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1156{
1157 if (from < ud->rchan_cnt)
1158 return -EINVAL;
1159 if (from + cnt > ud->rflow_cnt)
1160 return -EINVAL;
1161
1162 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1163 return 0;
1164}
1165
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001166static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1167{
1168 /*
1169 * Attempt to request rflow by ID can be made for any rflow
1170 * if not in use with assumption that caller knows what's doing.
1171 * TI-SCI FW will perform additional permission check ant way, it's
1172 * safe
1173 */
1174
1175 if (id < 0 || id >= ud->rflow_cnt)
1176 return ERR_PTR(-ENOENT);
1177
1178 if (test_bit(id, ud->rflow_in_use))
1179 return ERR_PTR(-ENOENT);
1180
1181 /* GP rflow has to be allocated first */
1182 if (!test_bit(id, ud->rflow_gp_map) &&
1183 !test_bit(id, ud->rflow_gp_map_allocated))
1184 return ERR_PTR(-EINVAL);
1185
1186 dev_dbg(ud->dev, "get rflow%d\n", id);
1187 set_bit(id, ud->rflow_in_use);
1188 return &ud->rflows[id];
1189}
1190
1191static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1192{
1193 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1194 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1195 return;
1196 }
1197
1198 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1199 clear_bit(rflow->id, ud->rflow_in_use);
1200}
1201
1202#define UDMA_RESERVE_RESOURCE(res) \
1203static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1204 enum udma_tp_level tpl, \
1205 int id) \
1206{ \
1207 if (id >= 0) { \
1208 if (test_bit(id, ud->res##_map)) { \
1209 dev_err(ud->dev, "res##%d is in use\n", id); \
1210 return ERR_PTR(-ENOENT); \
1211 } \
1212 } else { \
1213 int start; \
1214 \
1215 if (tpl >= ud->match_data->tpl_levels) \
1216 tpl = ud->match_data->tpl_levels - 1; \
1217 \
1218 start = ud->match_data->level_start_idx[tpl]; \
1219 \
1220 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1221 start); \
1222 if (id == ud->res##_cnt) { \
1223 return ERR_PTR(-ENOENT); \
1224 } \
1225 } \
1226 \
1227 set_bit(id, ud->res##_map); \
1228 return &ud->res##s[id]; \
1229}
1230
1231UDMA_RESERVE_RESOURCE(tchan);
1232UDMA_RESERVE_RESOURCE(rchan);
1233
1234static int udma_get_tchan(struct udma_chan *uc)
1235{
1236 struct udma_dev *ud = uc->ud;
1237
1238 if (uc->tchan) {
1239 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1240 uc->id, uc->tchan->id);
1241 return 0;
1242 }
1243
1244 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001245
Samuel Zou214a0002020-05-06 17:25:46 +08001246 return PTR_ERR_OR_ZERO(uc->tchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001247}
1248
1249static int udma_get_rchan(struct udma_chan *uc)
1250{
1251 struct udma_dev *ud = uc->ud;
1252
1253 if (uc->rchan) {
1254 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1255 uc->id, uc->rchan->id);
1256 return 0;
1257 }
1258
1259 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001260
Samuel Zou214a0002020-05-06 17:25:46 +08001261 return PTR_ERR_OR_ZERO(uc->rchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001262}
1263
1264static int udma_get_chan_pair(struct udma_chan *uc)
1265{
1266 struct udma_dev *ud = uc->ud;
1267 const struct udma_match_data *match_data = ud->match_data;
1268 int chan_id, end;
1269
1270 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1271 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1272 uc->id, uc->tchan->id);
1273 return 0;
1274 }
1275
1276 if (uc->tchan) {
1277 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1278 uc->id, uc->tchan->id);
1279 return -EBUSY;
1280 } else if (uc->rchan) {
1281 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1282 uc->id, uc->rchan->id);
1283 return -EBUSY;
1284 }
1285
1286 /* Can be optimized, but let's have it like this for now */
1287 end = min(ud->tchan_cnt, ud->rchan_cnt);
1288 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1289 chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
1290 for (; chan_id < end; chan_id++) {
1291 if (!test_bit(chan_id, ud->tchan_map) &&
1292 !test_bit(chan_id, ud->rchan_map))
1293 break;
1294 }
1295
1296 if (chan_id == end)
1297 return -ENOENT;
1298
1299 set_bit(chan_id, ud->tchan_map);
1300 set_bit(chan_id, ud->rchan_map);
1301 uc->tchan = &ud->tchans[chan_id];
1302 uc->rchan = &ud->rchans[chan_id];
1303
1304 return 0;
1305}
1306
1307static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1308{
1309 struct udma_dev *ud = uc->ud;
1310
1311 if (!uc->rchan) {
1312 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1313 return -EINVAL;
1314 }
1315
1316 if (uc->rflow) {
1317 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1318 uc->id, uc->rflow->id);
1319 return 0;
1320 }
1321
1322 uc->rflow = __udma_get_rflow(ud, flow_id);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001323
Samuel Zou214a0002020-05-06 17:25:46 +08001324 return PTR_ERR_OR_ZERO(uc->rflow);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001325}
1326
1327static void udma_put_rchan(struct udma_chan *uc)
1328{
1329 struct udma_dev *ud = uc->ud;
1330
1331 if (uc->rchan) {
1332 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1333 uc->rchan->id);
1334 clear_bit(uc->rchan->id, ud->rchan_map);
1335 uc->rchan = NULL;
1336 }
1337}
1338
1339static void udma_put_tchan(struct udma_chan *uc)
1340{
1341 struct udma_dev *ud = uc->ud;
1342
1343 if (uc->tchan) {
1344 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1345 uc->tchan->id);
1346 clear_bit(uc->tchan->id, ud->tchan_map);
1347 uc->tchan = NULL;
1348 }
1349}
1350
1351static void udma_put_rflow(struct udma_chan *uc)
1352{
1353 struct udma_dev *ud = uc->ud;
1354
1355 if (uc->rflow) {
1356 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1357 uc->rflow->id);
1358 __udma_put_rflow(ud, uc->rflow);
1359 uc->rflow = NULL;
1360 }
1361}
1362
1363static void udma_free_tx_resources(struct udma_chan *uc)
1364{
1365 if (!uc->tchan)
1366 return;
1367
1368 k3_ringacc_ring_free(uc->tchan->t_ring);
1369 k3_ringacc_ring_free(uc->tchan->tc_ring);
1370 uc->tchan->t_ring = NULL;
1371 uc->tchan->tc_ring = NULL;
1372
1373 udma_put_tchan(uc);
1374}
1375
1376static int udma_alloc_tx_resources(struct udma_chan *uc)
1377{
1378 struct k3_ring_cfg ring_cfg;
1379 struct udma_dev *ud = uc->ud;
1380 int ret;
1381
1382 ret = udma_get_tchan(uc);
1383 if (ret)
1384 return ret;
1385
1386 uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
1387 uc->tchan->id, 0);
1388 if (!uc->tchan->t_ring) {
1389 ret = -EBUSY;
1390 goto err_tx_ring;
1391 }
1392
1393 uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1394 if (!uc->tchan->tc_ring) {
1395 ret = -EBUSY;
1396 goto err_txc_ring;
1397 }
1398
1399 memset(&ring_cfg, 0, sizeof(ring_cfg));
1400 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1401 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1402 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1403
1404 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1405 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1406
1407 if (ret)
1408 goto err_ringcfg;
1409
1410 return 0;
1411
1412err_ringcfg:
1413 k3_ringacc_ring_free(uc->tchan->tc_ring);
1414 uc->tchan->tc_ring = NULL;
1415err_txc_ring:
1416 k3_ringacc_ring_free(uc->tchan->t_ring);
1417 uc->tchan->t_ring = NULL;
1418err_tx_ring:
1419 udma_put_tchan(uc);
1420
1421 return ret;
1422}
1423
1424static void udma_free_rx_resources(struct udma_chan *uc)
1425{
1426 if (!uc->rchan)
1427 return;
1428
1429 if (uc->rflow) {
1430 struct udma_rflow *rflow = uc->rflow;
1431
1432 k3_ringacc_ring_free(rflow->fd_ring);
1433 k3_ringacc_ring_free(rflow->r_ring);
1434 rflow->fd_ring = NULL;
1435 rflow->r_ring = NULL;
1436
1437 udma_put_rflow(uc);
1438 }
1439
1440 udma_put_rchan(uc);
1441}
1442
1443static int udma_alloc_rx_resources(struct udma_chan *uc)
1444{
1445 struct udma_dev *ud = uc->ud;
1446 struct k3_ring_cfg ring_cfg;
1447 struct udma_rflow *rflow;
1448 int fd_ring_id;
1449 int ret;
1450
1451 ret = udma_get_rchan(uc);
1452 if (ret)
1453 return ret;
1454
1455 /* For MEM_TO_MEM we don't need rflow or rings */
1456 if (uc->config.dir == DMA_MEM_TO_MEM)
1457 return 0;
1458
1459 ret = udma_get_rflow(uc, uc->rchan->id);
1460 if (ret) {
1461 ret = -EBUSY;
1462 goto err_rflow;
1463 }
1464
1465 rflow = uc->rflow;
1466 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1467 rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
1468 if (!rflow->fd_ring) {
1469 ret = -EBUSY;
1470 goto err_rx_ring;
1471 }
1472
1473 rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1474 if (!rflow->r_ring) {
1475 ret = -EBUSY;
1476 goto err_rxc_ring;
1477 }
1478
1479 memset(&ring_cfg, 0, sizeof(ring_cfg));
1480
1481 if (uc->config.pkt_mode)
1482 ring_cfg.size = SG_MAX_SEGMENTS;
1483 else
1484 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1485
1486 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1487 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1488
1489 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1490 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1491 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1492
1493 if (ret)
1494 goto err_ringcfg;
1495
1496 return 0;
1497
1498err_ringcfg:
1499 k3_ringacc_ring_free(rflow->r_ring);
1500 rflow->r_ring = NULL;
1501err_rxc_ring:
1502 k3_ringacc_ring_free(rflow->fd_ring);
1503 rflow->fd_ring = NULL;
1504err_rx_ring:
1505 udma_put_rflow(uc);
1506err_rflow:
1507 udma_put_rchan(uc);
1508
1509 return ret;
1510}
1511
1512#define TISCI_TCHAN_VALID_PARAMS ( \
1513 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1514 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1515 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1516 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1517 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1518 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001519 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1520 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001521
1522#define TISCI_RCHAN_VALID_PARAMS ( \
1523 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1524 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1525 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1526 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1527 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1528 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1529 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001530 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1531 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001532
1533static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1534{
1535 struct udma_dev *ud = uc->ud;
1536 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1537 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1538 struct udma_tchan *tchan = uc->tchan;
1539 struct udma_rchan *rchan = uc->rchan;
1540 int ret = 0;
1541
1542 /* Non synchronized - mem to mem type of transfer */
1543 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1544 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1545 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1546
1547 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1548 req_tx.nav_id = tisci_rm->tisci_dev_id;
1549 req_tx.index = tchan->id;
1550 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1551 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1552 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001553 req_tx.tx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001554
1555 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1556 if (ret) {
1557 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1558 return ret;
1559 }
1560
1561 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1562 req_rx.nav_id = tisci_rm->tisci_dev_id;
1563 req_rx.index = rchan->id;
1564 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1565 req_rx.rxcq_qnum = tc_ring;
1566 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001567 req_rx.rx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001568
1569 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1570 if (ret)
1571 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1572
1573 return ret;
1574}
1575
1576static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1577{
1578 struct udma_dev *ud = uc->ud;
1579 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1580 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1581 struct udma_tchan *tchan = uc->tchan;
1582 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1583 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1584 u32 mode, fetch_size;
1585 int ret = 0;
1586
1587 if (uc->config.pkt_mode) {
1588 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1589 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1590 uc->config.psd_size, 0);
1591 } else {
1592 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1593 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1594 }
1595
1596 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1597 req_tx.nav_id = tisci_rm->tisci_dev_id;
1598 req_tx.index = tchan->id;
1599 req_tx.tx_chan_type = mode;
1600 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1601 req_tx.tx_fetch_size = fetch_size >> 2;
1602 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001603 req_tx.tx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001604
1605 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1606 if (ret)
1607 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1608
1609 return ret;
1610}
1611
1612static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1613{
1614 struct udma_dev *ud = uc->ud;
1615 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1616 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1617 struct udma_rchan *rchan = uc->rchan;
1618 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1619 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1620 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1621 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1622 u32 mode, fetch_size;
1623 int ret = 0;
1624
1625 if (uc->config.pkt_mode) {
1626 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1627 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1628 uc->config.psd_size, 0);
1629 } else {
1630 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1631 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1632 }
1633
1634 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1635 req_rx.nav_id = tisci_rm->tisci_dev_id;
1636 req_rx.index = rchan->id;
1637 req_rx.rx_fetch_size = fetch_size >> 2;
1638 req_rx.rxcq_qnum = rx_ring;
1639 req_rx.rx_chan_type = mode;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001640 req_rx.rx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001641
1642 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1643 if (ret) {
1644 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1645 return ret;
1646 }
1647
1648 flow_req.valid_params =
1649 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1650 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1651 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1652 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1653 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1654 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1655 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1656 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1657 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1658 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1659 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1660 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1661 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1662
1663 flow_req.nav_id = tisci_rm->tisci_dev_id;
1664 flow_req.flow_index = rchan->id;
1665
1666 if (uc->config.needs_epib)
1667 flow_req.rx_einfo_present = 1;
1668 else
1669 flow_req.rx_einfo_present = 0;
1670 if (uc->config.psd_size)
1671 flow_req.rx_psinfo_present = 1;
1672 else
1673 flow_req.rx_psinfo_present = 0;
1674 flow_req.rx_error_handling = 1;
1675 flow_req.rx_dest_qnum = rx_ring;
1676 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1677 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1678 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1679 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1680 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1681 flow_req.rx_fdq1_qnum = fd_ring;
1682 flow_req.rx_fdq2_qnum = fd_ring;
1683 flow_req.rx_fdq3_qnum = fd_ring;
1684
1685 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1686
1687 if (ret)
1688 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1689
1690 return 0;
1691}
1692
1693static int udma_alloc_chan_resources(struct dma_chan *chan)
1694{
1695 struct udma_chan *uc = to_udma_chan(chan);
1696 struct udma_dev *ud = to_udma_dev(chan->device);
1697 const struct udma_match_data *match_data = ud->match_data;
1698 struct k3_ring *irq_ring;
1699 u32 irq_udma_idx;
1700 int ret;
1701
1702 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1703 uc->use_dma_pool = true;
1704 /* in case of MEM_TO_MEM we have maximum of two TRs */
1705 if (uc->config.dir == DMA_MEM_TO_MEM) {
1706 uc->config.hdesc_size = cppi5_trdesc_calc_size(
1707 sizeof(struct cppi5_tr_type15_t), 2);
1708 uc->config.pkt_mode = false;
1709 }
1710 }
1711
1712 if (uc->use_dma_pool) {
1713 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1714 uc->config.hdesc_size,
1715 ud->desc_align,
1716 0);
1717 if (!uc->hdesc_pool) {
1718 dev_err(ud->ddev.dev,
1719 "Descriptor pool allocation failed\n");
1720 uc->use_dma_pool = false;
1721 return -ENOMEM;
1722 }
1723 }
1724
1725 /*
1726 * Make sure that the completion is in a known state:
1727 * No teardown, the channel is idle
1728 */
1729 reinit_completion(&uc->teardown_completed);
1730 complete_all(&uc->teardown_completed);
1731 uc->state = UDMA_CHAN_IS_IDLE;
1732
1733 switch (uc->config.dir) {
1734 case DMA_MEM_TO_MEM:
1735 /* Non synchronized - mem to mem type of transfer */
1736 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1737 uc->id);
1738
1739 ret = udma_get_chan_pair(uc);
1740 if (ret)
1741 return ret;
1742
1743 ret = udma_alloc_tx_resources(uc);
1744 if (ret)
1745 return ret;
1746
1747 ret = udma_alloc_rx_resources(uc);
1748 if (ret) {
1749 udma_free_tx_resources(uc);
1750 return ret;
1751 }
1752
1753 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1754 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1755 K3_PSIL_DST_THREAD_ID_OFFSET;
1756
1757 irq_ring = uc->tchan->tc_ring;
1758 irq_udma_idx = uc->tchan->id;
1759
1760 ret = udma_tisci_m2m_channel_config(uc);
1761 break;
1762 case DMA_MEM_TO_DEV:
1763 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1764 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1765 uc->id);
1766
1767 ret = udma_alloc_tx_resources(uc);
1768 if (ret) {
1769 uc->config.remote_thread_id = -1;
1770 return ret;
1771 }
1772
1773 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1774 uc->config.dst_thread = uc->config.remote_thread_id;
1775 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1776
1777 irq_ring = uc->tchan->tc_ring;
1778 irq_udma_idx = uc->tchan->id;
1779
1780 ret = udma_tisci_tx_channel_config(uc);
1781 break;
1782 case DMA_DEV_TO_MEM:
1783 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1784 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1785 uc->id);
1786
1787 ret = udma_alloc_rx_resources(uc);
1788 if (ret) {
1789 uc->config.remote_thread_id = -1;
1790 return ret;
1791 }
1792
1793 uc->config.src_thread = uc->config.remote_thread_id;
1794 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1795 K3_PSIL_DST_THREAD_ID_OFFSET;
1796
1797 irq_ring = uc->rflow->r_ring;
1798 irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
1799
1800 ret = udma_tisci_rx_channel_config(uc);
1801 break;
1802 default:
1803 /* Can not happen */
1804 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1805 __func__, uc->id, uc->config.dir);
1806 return -EINVAL;
1807 }
1808
1809 /* check if the channel configuration was successful */
1810 if (ret)
1811 goto err_res_free;
1812
1813 if (udma_is_chan_running(uc)) {
1814 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1815 udma_stop(uc);
1816 if (udma_is_chan_running(uc)) {
1817 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
Peter Ujfalusi7ae6d7b2020-05-12 16:45:19 +03001818 ret = -EBUSY;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001819 goto err_res_free;
1820 }
1821 }
1822
1823 /* PSI-L pairing */
1824 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1825 if (ret) {
1826 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1827 uc->config.src_thread, uc->config.dst_thread);
1828 goto err_res_free;
1829 }
1830
1831 uc->psil_paired = true;
1832
1833 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1834 if (uc->irq_num_ring <= 0) {
1835 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1836 k3_ringacc_get_ring_id(irq_ring));
1837 ret = -EINVAL;
1838 goto err_psi_free;
1839 }
1840
1841 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1842 IRQF_TRIGGER_HIGH, uc->name, uc);
1843 if (ret) {
1844 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1845 goto err_irq_free;
1846 }
1847
1848 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1849 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1850 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1851 irq_udma_idx);
1852 if (uc->irq_num_udma <= 0) {
1853 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1854 irq_udma_idx);
1855 free_irq(uc->irq_num_ring, uc);
1856 ret = -EINVAL;
1857 goto err_irq_free;
1858 }
1859
1860 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1861 uc->name, uc);
1862 if (ret) {
1863 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1864 uc->id);
1865 free_irq(uc->irq_num_ring, uc);
1866 goto err_irq_free;
1867 }
1868 } else {
1869 uc->irq_num_udma = 0;
1870 }
1871
1872 udma_reset_rings(uc);
1873
1874 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
1875 udma_check_tx_completion);
1876 return 0;
1877
1878err_irq_free:
1879 uc->irq_num_ring = 0;
1880 uc->irq_num_udma = 0;
1881err_psi_free:
1882 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1883 uc->psil_paired = false;
1884err_res_free:
1885 udma_free_tx_resources(uc);
1886 udma_free_rx_resources(uc);
1887
1888 udma_reset_uchan(uc);
1889
1890 if (uc->use_dma_pool) {
1891 dma_pool_destroy(uc->hdesc_pool);
1892 uc->use_dma_pool = false;
1893 }
1894
1895 return ret;
1896}
1897
1898static int udma_slave_config(struct dma_chan *chan,
1899 struct dma_slave_config *cfg)
1900{
1901 struct udma_chan *uc = to_udma_chan(chan);
1902
1903 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1904
1905 return 0;
1906}
1907
1908static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1909 size_t tr_size, int tr_count,
1910 enum dma_transfer_direction dir)
1911{
1912 struct udma_hwdesc *hwdesc;
1913 struct cppi5_desc_hdr_t *tr_desc;
1914 struct udma_desc *d;
1915 u32 reload_count = 0;
1916 u32 ring_id;
1917
1918 switch (tr_size) {
1919 case 16:
1920 case 32:
1921 case 64:
1922 case 128:
1923 break;
1924 default:
1925 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1926 return NULL;
1927 }
1928
1929 /* We have only one descriptor containing multiple TRs */
1930 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1931 if (!d)
1932 return NULL;
1933
1934 d->sglen = tr_count;
1935
1936 d->hwdesc_count = 1;
1937 hwdesc = &d->hwdesc[0];
1938
1939 /* Allocate memory for DMA ring descriptor */
1940 if (uc->use_dma_pool) {
1941 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1942 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1943 GFP_NOWAIT,
1944 &hwdesc->cppi5_desc_paddr);
1945 } else {
1946 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1947 tr_count);
1948 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1949 uc->ud->desc_align);
1950 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1951 hwdesc->cppi5_desc_size,
1952 &hwdesc->cppi5_desc_paddr,
1953 GFP_NOWAIT);
1954 }
1955
1956 if (!hwdesc->cppi5_desc_vaddr) {
1957 kfree(d);
1958 return NULL;
1959 }
1960
1961 /* Start of the TR req records */
1962 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
1963 /* Start address of the TR response array */
1964 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
1965
1966 tr_desc = hwdesc->cppi5_desc_vaddr;
1967
1968 if (uc->cyclic)
1969 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
1970
1971 if (dir == DMA_DEV_TO_MEM)
1972 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1973 else
1974 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
1975
1976 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
1977 cppi5_desc_set_pktids(tr_desc, uc->id,
1978 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
1979 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
1980
1981 return d;
1982}
1983
Peter Ujfalusia9793402020-02-14 11:14:38 +02001984/**
1985 * udma_get_tr_counters - calculate TR counters for a given length
1986 * @len: Length of the trasnfer
1987 * @align_to: Preferred alignment
1988 * @tr0_cnt0: First TR icnt0
1989 * @tr0_cnt1: First TR icnt1
1990 * @tr1_cnt0: Second (if used) TR icnt0
1991 *
1992 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
1993 * For len >= SZ_64K two TRs are used in a simple way:
1994 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
1995 * Second TR: the remaining length (tr1_cnt0)
1996 *
1997 * Returns the number of TRs the length needs (1 or 2)
1998 * -EINVAL if the length can not be supported
1999 */
2000static int udma_get_tr_counters(size_t len, unsigned long align_to,
2001 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2002{
2003 if (len < SZ_64K) {
2004 *tr0_cnt0 = len;
2005 *tr0_cnt1 = 1;
2006
2007 return 1;
2008 }
2009
2010 if (align_to > 3)
2011 align_to = 3;
2012
2013realign:
2014 *tr0_cnt0 = SZ_64K - BIT(align_to);
2015 if (len / *tr0_cnt0 >= SZ_64K) {
2016 if (align_to) {
2017 align_to--;
2018 goto realign;
2019 }
2020 return -EINVAL;
2021 }
2022
2023 *tr0_cnt1 = len / *tr0_cnt0;
2024 *tr1_cnt0 = len % *tr0_cnt0;
2025
2026 return 2;
2027}
2028
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002029static struct udma_desc *
2030udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2031 unsigned int sglen, enum dma_transfer_direction dir,
2032 unsigned long tx_flags, void *context)
2033{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002034 struct scatterlist *sgent;
2035 struct udma_desc *d;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002036 struct cppi5_tr_type1_t *tr_req = NULL;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002037 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002038 unsigned int i;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002039 size_t tr_size;
2040 int num_tr = 0;
2041 int tr_idx = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002042
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002043 if (!is_slave_direction(dir)) {
2044 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002045 return NULL;
2046 }
2047
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002048 /* estimate the number of TRs we will need */
2049 for_each_sg(sgl, sgent, sglen, i) {
2050 if (sg_dma_len(sgent) < SZ_64K)
2051 num_tr++;
2052 else
2053 num_tr += 2;
2054 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002055
2056 /* Now allocate and setup the descriptor. */
2057 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002058 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002059 if (!d)
2060 return NULL;
2061
2062 d->sglen = sglen;
2063
2064 tr_req = d->hwdesc[0].tr_req_base;
2065 for_each_sg(sgl, sgent, sglen, i) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002066 dma_addr_t sg_addr = sg_dma_address(sgent);
2067
2068 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2069 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2070 if (num_tr < 0) {
2071 dev_err(uc->ud->dev, "size %u is not supported\n",
2072 sg_dma_len(sgent));
2073 udma_free_hwdesc(uc, d);
2074 kfree(d);
2075 return NULL;
2076 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002077
2078 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2079 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2080 cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
2081
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002082 tr_req[tr_idx].addr = sg_addr;
2083 tr_req[tr_idx].icnt0 = tr0_cnt0;
2084 tr_req[tr_idx].icnt1 = tr0_cnt1;
2085 tr_req[tr_idx].dim1 = tr0_cnt0;
2086 tr_idx++;
2087
2088 if (num_tr == 2) {
2089 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2090 false, false,
2091 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2092 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2093 CPPI5_TR_CSF_SUPR_EVT);
2094
2095 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2096 tr_req[tr_idx].icnt0 = tr1_cnt0;
2097 tr_req[tr_idx].icnt1 = 1;
2098 tr_req[tr_idx].dim1 = tr1_cnt0;
2099 tr_idx++;
2100 }
2101
2102 d->residue += sg_dma_len(sgent);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002103 }
2104
Peter Ujfalusibe4054b2020-05-12 16:45:31 +03002105 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2106 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002107
2108 return d;
2109}
2110
2111static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2112 enum dma_slave_buswidth dev_width,
2113 u16 elcnt)
2114{
2115 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2116 return 0;
2117
2118 /* Bus width translates to the element size (ES) */
2119 switch (dev_width) {
2120 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2121 d->static_tr.elsize = 0;
2122 break;
2123 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2124 d->static_tr.elsize = 1;
2125 break;
2126 case DMA_SLAVE_BUSWIDTH_3_BYTES:
2127 d->static_tr.elsize = 2;
2128 break;
2129 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2130 d->static_tr.elsize = 3;
2131 break;
2132 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2133 d->static_tr.elsize = 4;
2134 break;
2135 default: /* not reached */
2136 return -EINVAL;
2137 }
2138
2139 d->static_tr.elcnt = elcnt;
2140
2141 /*
2142 * PDMA must to close the packet when the channel is in packet mode.
2143 * For TR mode when the channel is not cyclic we also need PDMA to close
2144 * the packet otherwise the transfer will stall because PDMA holds on
2145 * the data it has received from the peripheral.
2146 */
2147 if (uc->config.pkt_mode || !uc->cyclic) {
2148 unsigned int div = dev_width * elcnt;
2149
2150 if (uc->cyclic)
2151 d->static_tr.bstcnt = d->residue / d->sglen / div;
2152 else
2153 d->static_tr.bstcnt = d->residue / div;
2154
2155 if (uc->config.dir == DMA_DEV_TO_MEM &&
2156 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2157 return -EINVAL;
2158 } else {
2159 d->static_tr.bstcnt = 0;
2160 }
2161
2162 return 0;
2163}
2164
2165static struct udma_desc *
2166udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2167 unsigned int sglen, enum dma_transfer_direction dir,
2168 unsigned long tx_flags, void *context)
2169{
2170 struct scatterlist *sgent;
2171 struct cppi5_host_desc_t *h_desc = NULL;
2172 struct udma_desc *d;
2173 u32 ring_id;
2174 unsigned int i;
2175
2176 d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2177 if (!d)
2178 return NULL;
2179
2180 d->sglen = sglen;
2181 d->hwdesc_count = sglen;
2182
2183 if (dir == DMA_DEV_TO_MEM)
2184 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2185 else
2186 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2187
2188 for_each_sg(sgl, sgent, sglen, i) {
2189 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2190 dma_addr_t sg_addr = sg_dma_address(sgent);
2191 struct cppi5_host_desc_t *desc;
2192 size_t sg_len = sg_dma_len(sgent);
2193
2194 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2195 GFP_NOWAIT,
2196 &hwdesc->cppi5_desc_paddr);
2197 if (!hwdesc->cppi5_desc_vaddr) {
2198 dev_err(uc->ud->dev,
2199 "descriptor%d allocation failed\n", i);
2200
2201 udma_free_hwdesc(uc, d);
2202 kfree(d);
2203 return NULL;
2204 }
2205
2206 d->residue += sg_len;
2207 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2208 desc = hwdesc->cppi5_desc_vaddr;
2209
2210 if (i == 0) {
2211 cppi5_hdesc_init(desc, 0, 0);
2212 /* Flow and Packed ID */
2213 cppi5_desc_set_pktids(&desc->hdr, uc->id,
2214 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2215 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2216 } else {
2217 cppi5_hdesc_reset_hbdesc(desc);
2218 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2219 }
2220
2221 /* attach the sg buffer to the descriptor */
2222 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2223
2224 /* Attach link as host buffer descriptor */
2225 if (h_desc)
2226 cppi5_hdesc_link_hbdesc(h_desc,
2227 hwdesc->cppi5_desc_paddr);
2228
2229 if (dir == DMA_MEM_TO_DEV)
2230 h_desc = desc;
2231 }
2232
2233 if (d->residue >= SZ_4M) {
2234 dev_err(uc->ud->dev,
2235 "%s: Transfer size %u is over the supported 4M range\n",
2236 __func__, d->residue);
2237 udma_free_hwdesc(uc, d);
2238 kfree(d);
2239 return NULL;
2240 }
2241
2242 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2243 cppi5_hdesc_set_pktlen(h_desc, d->residue);
2244
2245 return d;
2246}
2247
2248static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2249 void *data, size_t len)
2250{
2251 struct udma_desc *d = to_udma_desc(desc);
2252 struct udma_chan *uc = to_udma_chan(desc->chan);
2253 struct cppi5_host_desc_t *h_desc;
2254 u32 psd_size = len;
2255 u32 flags = 0;
2256
2257 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2258 return -ENOTSUPP;
2259
2260 if (!data || len > uc->config.metadata_size)
2261 return -EINVAL;
2262
2263 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2264 return -EINVAL;
2265
2266 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2267 if (d->dir == DMA_MEM_TO_DEV)
2268 memcpy(h_desc->epib, data, len);
2269
2270 if (uc->config.needs_epib)
2271 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2272
2273 d->metadata = data;
2274 d->metadata_size = len;
2275 if (uc->config.needs_epib)
2276 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2277
2278 cppi5_hdesc_update_flags(h_desc, flags);
2279 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2280
2281 return 0;
2282}
2283
2284static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2285 size_t *payload_len, size_t *max_len)
2286{
2287 struct udma_desc *d = to_udma_desc(desc);
2288 struct udma_chan *uc = to_udma_chan(desc->chan);
2289 struct cppi5_host_desc_t *h_desc;
2290
2291 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2292 return ERR_PTR(-ENOTSUPP);
2293
2294 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2295
2296 *max_len = uc->config.metadata_size;
2297
2298 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2299 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2300 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2301
2302 return h_desc->epib;
2303}
2304
2305static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2306 size_t payload_len)
2307{
2308 struct udma_desc *d = to_udma_desc(desc);
2309 struct udma_chan *uc = to_udma_chan(desc->chan);
2310 struct cppi5_host_desc_t *h_desc;
2311 u32 psd_size = payload_len;
2312 u32 flags = 0;
2313
2314 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2315 return -ENOTSUPP;
2316
2317 if (payload_len > uc->config.metadata_size)
2318 return -EINVAL;
2319
2320 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2321 return -EINVAL;
2322
2323 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2324
2325 if (uc->config.needs_epib) {
2326 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2327 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2328 }
2329
2330 cppi5_hdesc_update_flags(h_desc, flags);
2331 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2332
2333 return 0;
2334}
2335
2336static struct dma_descriptor_metadata_ops metadata_ops = {
2337 .attach = udma_attach_metadata,
2338 .get_ptr = udma_get_metadata_ptr,
2339 .set_len = udma_set_metadata_len,
2340};
2341
2342static struct dma_async_tx_descriptor *
2343udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2344 unsigned int sglen, enum dma_transfer_direction dir,
2345 unsigned long tx_flags, void *context)
2346{
2347 struct udma_chan *uc = to_udma_chan(chan);
2348 enum dma_slave_buswidth dev_width;
2349 struct udma_desc *d;
2350 u32 burst;
2351
2352 if (dir != uc->config.dir) {
2353 dev_err(chan->device->dev,
2354 "%s: chan%d is for %s, not supporting %s\n",
2355 __func__, uc->id,
2356 dmaengine_get_direction_text(uc->config.dir),
2357 dmaengine_get_direction_text(dir));
2358 return NULL;
2359 }
2360
2361 if (dir == DMA_DEV_TO_MEM) {
2362 dev_width = uc->cfg.src_addr_width;
2363 burst = uc->cfg.src_maxburst;
2364 } else if (dir == DMA_MEM_TO_DEV) {
2365 dev_width = uc->cfg.dst_addr_width;
2366 burst = uc->cfg.dst_maxburst;
2367 } else {
2368 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2369 return NULL;
2370 }
2371
2372 if (!burst)
2373 burst = 1;
2374
2375 if (uc->config.pkt_mode)
2376 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2377 context);
2378 else
2379 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2380 context);
2381
2382 if (!d)
2383 return NULL;
2384
2385 d->dir = dir;
2386 d->desc_idx = 0;
2387 d->tr_idx = 0;
2388
2389 /* static TR for remote PDMA */
2390 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2391 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002392 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002393 __func__, d->static_tr.bstcnt);
2394
2395 udma_free_hwdesc(uc, d);
2396 kfree(d);
2397 return NULL;
2398 }
2399
2400 if (uc->config.metadata_size)
2401 d->vd.tx.metadata_ops = &metadata_ops;
2402
2403 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2404}
2405
2406static struct udma_desc *
2407udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2408 size_t buf_len, size_t period_len,
2409 enum dma_transfer_direction dir, unsigned long flags)
2410{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002411 struct udma_desc *d;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002412 size_t tr_size, period_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002413 struct cppi5_tr_type1_t *tr_req;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002414 unsigned int periods = buf_len / period_len;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002415 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2416 unsigned int i;
2417 int num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002418
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002419 if (!is_slave_direction(dir)) {
2420 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002421 return NULL;
2422 }
2423
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002424 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2425 &tr0_cnt1, &tr1_cnt0);
2426 if (num_tr < 0) {
2427 dev_err(uc->ud->dev, "size %zu is not supported\n",
2428 period_len);
2429 return NULL;
2430 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002431
2432 /* Now allocate and setup the descriptor. */
2433 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002434 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002435 if (!d)
2436 return NULL;
2437
2438 tr_req = d->hwdesc[0].tr_req_base;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002439 period_addr = buf_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002440 for (i = 0; i < periods; i++) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002441 int tr_idx = i * num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002442
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002443 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2444 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2445
2446 tr_req[tr_idx].addr = period_addr;
2447 tr_req[tr_idx].icnt0 = tr0_cnt0;
2448 tr_req[tr_idx].icnt1 = tr0_cnt1;
2449 tr_req[tr_idx].dim1 = tr0_cnt0;
2450
2451 if (num_tr == 2) {
2452 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2453 CPPI5_TR_CSF_SUPR_EVT);
2454 tr_idx++;
2455
2456 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2457 false, false,
2458 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2459
2460 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2461 tr_req[tr_idx].icnt0 = tr1_cnt0;
2462 tr_req[tr_idx].icnt1 = 1;
2463 tr_req[tr_idx].dim1 = tr1_cnt0;
2464 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002465
2466 if (!(flags & DMA_PREP_INTERRUPT))
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002467 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002468 CPPI5_TR_CSF_SUPR_EVT);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002469
2470 period_addr += period_len;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002471 }
2472
2473 return d;
2474}
2475
2476static struct udma_desc *
2477udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2478 size_t buf_len, size_t period_len,
2479 enum dma_transfer_direction dir, unsigned long flags)
2480{
2481 struct udma_desc *d;
2482 u32 ring_id;
2483 int i;
2484 int periods = buf_len / period_len;
2485
2486 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2487 return NULL;
2488
2489 if (period_len >= SZ_4M)
2490 return NULL;
2491
2492 d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2493 if (!d)
2494 return NULL;
2495
2496 d->hwdesc_count = periods;
2497
2498 /* TODO: re-check this... */
2499 if (dir == DMA_DEV_TO_MEM)
2500 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2501 else
2502 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2503
2504 for (i = 0; i < periods; i++) {
2505 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2506 dma_addr_t period_addr = buf_addr + (period_len * i);
2507 struct cppi5_host_desc_t *h_desc;
2508
2509 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2510 GFP_NOWAIT,
2511 &hwdesc->cppi5_desc_paddr);
2512 if (!hwdesc->cppi5_desc_vaddr) {
2513 dev_err(uc->ud->dev,
2514 "descriptor%d allocation failed\n", i);
2515
2516 udma_free_hwdesc(uc, d);
2517 kfree(d);
2518 return NULL;
2519 }
2520
2521 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2522 h_desc = hwdesc->cppi5_desc_vaddr;
2523
2524 cppi5_hdesc_init(h_desc, 0, 0);
2525 cppi5_hdesc_set_pktlen(h_desc, period_len);
2526
2527 /* Flow and Packed ID */
2528 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2529 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2530 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2531
2532 /* attach each period to a new descriptor */
2533 cppi5_hdesc_attach_buf(h_desc,
2534 period_addr, period_len,
2535 period_addr, period_len);
2536 }
2537
2538 return d;
2539}
2540
2541static struct dma_async_tx_descriptor *
2542udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2543 size_t period_len, enum dma_transfer_direction dir,
2544 unsigned long flags)
2545{
2546 struct udma_chan *uc = to_udma_chan(chan);
2547 enum dma_slave_buswidth dev_width;
2548 struct udma_desc *d;
2549 u32 burst;
2550
2551 if (dir != uc->config.dir) {
2552 dev_err(chan->device->dev,
2553 "%s: chan%d is for %s, not supporting %s\n",
2554 __func__, uc->id,
2555 dmaengine_get_direction_text(uc->config.dir),
2556 dmaengine_get_direction_text(dir));
2557 return NULL;
2558 }
2559
2560 uc->cyclic = true;
2561
2562 if (dir == DMA_DEV_TO_MEM) {
2563 dev_width = uc->cfg.src_addr_width;
2564 burst = uc->cfg.src_maxburst;
2565 } else if (dir == DMA_MEM_TO_DEV) {
2566 dev_width = uc->cfg.dst_addr_width;
2567 burst = uc->cfg.dst_maxburst;
2568 } else {
2569 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2570 return NULL;
2571 }
2572
2573 if (!burst)
2574 burst = 1;
2575
2576 if (uc->config.pkt_mode)
2577 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2578 dir, flags);
2579 else
2580 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2581 dir, flags);
2582
2583 if (!d)
2584 return NULL;
2585
2586 d->sglen = buf_len / period_len;
2587
2588 d->dir = dir;
2589 d->residue = buf_len;
2590
2591 /* static TR for remote PDMA */
2592 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2593 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002594 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002595 __func__, d->static_tr.bstcnt);
2596
2597 udma_free_hwdesc(uc, d);
2598 kfree(d);
2599 return NULL;
2600 }
2601
2602 if (uc->config.metadata_size)
2603 d->vd.tx.metadata_ops = &metadata_ops;
2604
2605 return vchan_tx_prep(&uc->vc, &d->vd, flags);
2606}
2607
2608static struct dma_async_tx_descriptor *
2609udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2610 size_t len, unsigned long tx_flags)
2611{
2612 struct udma_chan *uc = to_udma_chan(chan);
2613 struct udma_desc *d;
2614 struct cppi5_tr_type15_t *tr_req;
2615 int num_tr;
2616 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2617 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2618
2619 if (uc->config.dir != DMA_MEM_TO_MEM) {
2620 dev_err(chan->device->dev,
2621 "%s: chan%d is for %s, not supporting %s\n",
2622 __func__, uc->id,
2623 dmaengine_get_direction_text(uc->config.dir),
2624 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2625 return NULL;
2626 }
2627
Peter Ujfalusia9793402020-02-14 11:14:38 +02002628 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2629 &tr0_cnt1, &tr1_cnt0);
2630 if (num_tr < 0) {
2631 dev_err(uc->ud->dev, "size %zu is not supported\n",
2632 len);
2633 return NULL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002634 }
2635
2636 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2637 if (!d)
2638 return NULL;
2639
2640 d->dir = DMA_MEM_TO_MEM;
2641 d->desc_idx = 0;
2642 d->tr_idx = 0;
2643 d->residue = len;
2644
2645 tr_req = d->hwdesc[0].tr_req_base;
2646
2647 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2648 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2649 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2650
2651 tr_req[0].addr = src;
2652 tr_req[0].icnt0 = tr0_cnt0;
2653 tr_req[0].icnt1 = tr0_cnt1;
2654 tr_req[0].icnt2 = 1;
2655 tr_req[0].icnt3 = 1;
2656 tr_req[0].dim1 = tr0_cnt0;
2657
2658 tr_req[0].daddr = dest;
2659 tr_req[0].dicnt0 = tr0_cnt0;
2660 tr_req[0].dicnt1 = tr0_cnt1;
2661 tr_req[0].dicnt2 = 1;
2662 tr_req[0].dicnt3 = 1;
2663 tr_req[0].ddim1 = tr0_cnt0;
2664
2665 if (num_tr == 2) {
2666 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2667 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2668 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2669
2670 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2671 tr_req[1].icnt0 = tr1_cnt0;
2672 tr_req[1].icnt1 = 1;
2673 tr_req[1].icnt2 = 1;
2674 tr_req[1].icnt3 = 1;
2675
2676 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2677 tr_req[1].dicnt0 = tr1_cnt0;
2678 tr_req[1].dicnt1 = 1;
2679 tr_req[1].dicnt2 = 1;
2680 tr_req[1].dicnt3 = 1;
2681 }
2682
Peter Ujfalusibe4054b2020-05-12 16:45:31 +03002683 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
2684 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002685
2686 if (uc->config.metadata_size)
2687 d->vd.tx.metadata_ops = &metadata_ops;
2688
2689 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2690}
2691
2692static void udma_issue_pending(struct dma_chan *chan)
2693{
2694 struct udma_chan *uc = to_udma_chan(chan);
2695 unsigned long flags;
2696
2697 spin_lock_irqsave(&uc->vc.lock, flags);
2698
2699 /* If we have something pending and no active descriptor, then */
2700 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2701 /*
2702 * start a descriptor if the channel is NOT [marked as
2703 * terminating _and_ it is still running (teardown has not
2704 * completed yet)].
2705 */
2706 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2707 udma_is_chan_running(uc)))
2708 udma_start(uc);
2709 }
2710
2711 spin_unlock_irqrestore(&uc->vc.lock, flags);
2712}
2713
2714static enum dma_status udma_tx_status(struct dma_chan *chan,
2715 dma_cookie_t cookie,
2716 struct dma_tx_state *txstate)
2717{
2718 struct udma_chan *uc = to_udma_chan(chan);
2719 enum dma_status ret;
2720 unsigned long flags;
2721
2722 spin_lock_irqsave(&uc->vc.lock, flags);
2723
2724 ret = dma_cookie_status(chan, cookie, txstate);
2725
Peter Ujfalusi83903182020-02-14 11:14:41 +02002726 if (!udma_is_chan_running(uc))
2727 ret = DMA_COMPLETE;
2728
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002729 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2730 ret = DMA_PAUSED;
2731
2732 if (ret == DMA_COMPLETE || !txstate)
2733 goto out;
2734
2735 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2736 u32 peer_bcnt = 0;
2737 u32 bcnt = 0;
2738 u32 residue = uc->desc->residue;
2739 u32 delay = 0;
2740
2741 if (uc->desc->dir == DMA_MEM_TO_DEV) {
2742 bcnt = udma_tchanrt_read(uc->tchan,
2743 UDMA_TCHAN_RT_SBCNT_REG);
2744
2745 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2746 peer_bcnt = udma_tchanrt_read(uc->tchan,
2747 UDMA_TCHAN_RT_PEER_BCNT_REG);
2748
2749 if (bcnt > peer_bcnt)
2750 delay = bcnt - peer_bcnt;
2751 }
2752 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2753 bcnt = udma_rchanrt_read(uc->rchan,
2754 UDMA_RCHAN_RT_BCNT_REG);
2755
2756 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2757 peer_bcnt = udma_rchanrt_read(uc->rchan,
2758 UDMA_RCHAN_RT_PEER_BCNT_REG);
2759
2760 if (peer_bcnt > bcnt)
2761 delay = peer_bcnt - bcnt;
2762 }
2763 } else {
2764 bcnt = udma_tchanrt_read(uc->tchan,
2765 UDMA_TCHAN_RT_BCNT_REG);
2766 }
2767
2768 bcnt -= uc->bcnt;
2769 if (bcnt && !(bcnt % uc->desc->residue))
2770 residue = 0;
2771 else
2772 residue -= bcnt % uc->desc->residue;
2773
2774 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2775 ret = DMA_COMPLETE;
2776 delay = 0;
2777 }
2778
2779 dma_set_residue(txstate, residue);
2780 dma_set_in_flight_bytes(txstate, delay);
2781
2782 } else {
2783 ret = DMA_COMPLETE;
2784 }
2785
2786out:
2787 spin_unlock_irqrestore(&uc->vc.lock, flags);
2788 return ret;
2789}
2790
2791static int udma_pause(struct dma_chan *chan)
2792{
2793 struct udma_chan *uc = to_udma_chan(chan);
2794
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002795 /* pause the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002796 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002797 case DMA_DEV_TO_MEM:
2798 udma_rchanrt_update_bits(uc->rchan,
2799 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2800 UDMA_PEER_RT_EN_PAUSE,
2801 UDMA_PEER_RT_EN_PAUSE);
2802 break;
2803 case DMA_MEM_TO_DEV:
2804 udma_tchanrt_update_bits(uc->tchan,
2805 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2806 UDMA_PEER_RT_EN_PAUSE,
2807 UDMA_PEER_RT_EN_PAUSE);
2808 break;
2809 case DMA_MEM_TO_MEM:
2810 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2811 UDMA_CHAN_RT_CTL_PAUSE,
2812 UDMA_CHAN_RT_CTL_PAUSE);
2813 break;
2814 default:
2815 return -EINVAL;
2816 }
2817
2818 return 0;
2819}
2820
2821static int udma_resume(struct dma_chan *chan)
2822{
2823 struct udma_chan *uc = to_udma_chan(chan);
2824
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002825 /* resume the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002826 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002827 case DMA_DEV_TO_MEM:
2828 udma_rchanrt_update_bits(uc->rchan,
2829 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2830 UDMA_PEER_RT_EN_PAUSE, 0);
2831
2832 break;
2833 case DMA_MEM_TO_DEV:
2834 udma_tchanrt_update_bits(uc->tchan,
2835 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2836 UDMA_PEER_RT_EN_PAUSE, 0);
2837 break;
2838 case DMA_MEM_TO_MEM:
2839 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2840 UDMA_CHAN_RT_CTL_PAUSE, 0);
2841 break;
2842 default:
2843 return -EINVAL;
2844 }
2845
2846 return 0;
2847}
2848
2849static int udma_terminate_all(struct dma_chan *chan)
2850{
2851 struct udma_chan *uc = to_udma_chan(chan);
2852 unsigned long flags;
2853 LIST_HEAD(head);
2854
2855 spin_lock_irqsave(&uc->vc.lock, flags);
2856
2857 if (udma_is_chan_running(uc))
2858 udma_stop(uc);
2859
2860 if (uc->desc) {
2861 uc->terminated_desc = uc->desc;
2862 uc->desc = NULL;
2863 uc->terminated_desc->terminated = true;
2864 cancel_delayed_work(&uc->tx_drain.work);
2865 }
2866
2867 uc->paused = false;
2868
2869 vchan_get_all_descriptors(&uc->vc, &head);
2870 spin_unlock_irqrestore(&uc->vc.lock, flags);
2871 vchan_dma_desc_free_list(&uc->vc, &head);
2872
2873 return 0;
2874}
2875
2876static void udma_synchronize(struct dma_chan *chan)
2877{
2878 struct udma_chan *uc = to_udma_chan(chan);
2879 unsigned long timeout = msecs_to_jiffies(1000);
2880
2881 vchan_synchronize(&uc->vc);
2882
2883 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2884 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2885 timeout);
2886 if (!timeout) {
2887 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2888 uc->id);
2889 udma_dump_chan_stdata(uc);
2890 udma_reset_chan(uc, true);
2891 }
2892 }
2893
2894 udma_reset_chan(uc, false);
2895 if (udma_is_chan_running(uc))
2896 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2897
2898 cancel_delayed_work_sync(&uc->tx_drain.work);
2899 udma_reset_rings(uc);
2900}
2901
2902static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2903 struct virt_dma_desc *vd,
2904 struct dmaengine_result *result)
2905{
2906 struct udma_chan *uc = to_udma_chan(&vc->chan);
2907 struct udma_desc *d;
2908
2909 if (!vd)
2910 return;
2911
2912 d = to_udma_desc(&vd->tx);
2913
2914 if (d->metadata_size)
2915 udma_fetch_epib(uc, d);
2916
2917 /* Provide residue information for the client */
2918 if (result) {
2919 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2920
2921 if (cppi5_desc_get_type(desc_vaddr) ==
2922 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2923 result->residue = d->residue -
2924 cppi5_hdesc_get_pktlen(desc_vaddr);
2925 if (result->residue)
2926 result->result = DMA_TRANS_ABORTED;
2927 else
2928 result->result = DMA_TRANS_NOERROR;
2929 } else {
2930 result->residue = 0;
2931 result->result = DMA_TRANS_NOERROR;
2932 }
2933 }
2934}
2935
2936/*
2937 * This tasklet handles the completion of a DMA descriptor by
2938 * calling its callback and freeing it.
2939 */
2940static void udma_vchan_complete(unsigned long arg)
2941{
2942 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
2943 struct virt_dma_desc *vd, *_vd;
2944 struct dmaengine_desc_callback cb;
2945 LIST_HEAD(head);
2946
2947 spin_lock_irq(&vc->lock);
2948 list_splice_tail_init(&vc->desc_completed, &head);
2949 vd = vc->cyclic;
2950 if (vd) {
2951 vc->cyclic = NULL;
2952 dmaengine_desc_get_callback(&vd->tx, &cb);
2953 } else {
2954 memset(&cb, 0, sizeof(cb));
2955 }
2956 spin_unlock_irq(&vc->lock);
2957
2958 udma_desc_pre_callback(vc, vd, NULL);
2959 dmaengine_desc_callback_invoke(&cb, NULL);
2960
2961 list_for_each_entry_safe(vd, _vd, &head, node) {
2962 struct dmaengine_result result;
2963
2964 dmaengine_desc_get_callback(&vd->tx, &cb);
2965
2966 list_del(&vd->node);
2967
2968 udma_desc_pre_callback(vc, vd, &result);
2969 dmaengine_desc_callback_invoke(&cb, &result);
2970
2971 vchan_vdesc_fini(vd);
2972 }
2973}
2974
2975static void udma_free_chan_resources(struct dma_chan *chan)
2976{
2977 struct udma_chan *uc = to_udma_chan(chan);
2978 struct udma_dev *ud = to_udma_dev(chan->device);
2979
2980 udma_terminate_all(chan);
2981 if (uc->terminated_desc) {
2982 udma_reset_chan(uc, false);
2983 udma_reset_rings(uc);
2984 }
2985
2986 cancel_delayed_work_sync(&uc->tx_drain.work);
2987 destroy_delayed_work_on_stack(&uc->tx_drain.work);
2988
2989 if (uc->irq_num_ring > 0) {
2990 free_irq(uc->irq_num_ring, uc);
2991
2992 uc->irq_num_ring = 0;
2993 }
2994 if (uc->irq_num_udma > 0) {
2995 free_irq(uc->irq_num_udma, uc);
2996
2997 uc->irq_num_udma = 0;
2998 }
2999
3000 /* Release PSI-L pairing */
3001 if (uc->psil_paired) {
3002 navss_psil_unpair(ud, uc->config.src_thread,
3003 uc->config.dst_thread);
3004 uc->psil_paired = false;
3005 }
3006
3007 vchan_free_chan_resources(&uc->vc);
3008 tasklet_kill(&uc->vc.task);
3009
3010 udma_free_tx_resources(uc);
3011 udma_free_rx_resources(uc);
3012 udma_reset_uchan(uc);
3013
3014 if (uc->use_dma_pool) {
3015 dma_pool_destroy(uc->hdesc_pool);
3016 uc->use_dma_pool = false;
3017 }
3018}
3019
3020static struct platform_driver udma_driver;
3021
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003022struct udma_filter_param {
3023 int remote_thread_id;
3024 u32 atype;
3025};
3026
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003027static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
3028{
3029 struct udma_chan_config *ucc;
3030 struct psil_endpoint_config *ep_config;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003031 struct udma_filter_param *filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003032 struct udma_chan *uc;
3033 struct udma_dev *ud;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003034
3035 if (chan->device->dev->driver != &udma_driver.driver)
3036 return false;
3037
3038 uc = to_udma_chan(chan);
3039 ucc = &uc->config;
3040 ud = uc->ud;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003041 filter_param = param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003042
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003043 if (filter_param->atype > 2) {
3044 dev_err(ud->dev, "Invalid channel atype: %u\n",
3045 filter_param->atype);
3046 return false;
3047 }
3048
3049 ucc->remote_thread_id = filter_param->remote_thread_id;
3050 ucc->atype = filter_param->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003051
3052 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3053 ucc->dir = DMA_MEM_TO_DEV;
3054 else
3055 ucc->dir = DMA_DEV_TO_MEM;
3056
3057 ep_config = psil_get_ep_config(ucc->remote_thread_id);
3058 if (IS_ERR(ep_config)) {
3059 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3060 ucc->remote_thread_id);
3061 ucc->dir = DMA_MEM_TO_MEM;
3062 ucc->remote_thread_id = -1;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003063 ucc->atype = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003064 return false;
3065 }
3066
3067 ucc->pkt_mode = ep_config->pkt_mode;
3068 ucc->channel_tpl = ep_config->channel_tpl;
3069 ucc->notdpkt = ep_config->notdpkt;
3070 ucc->ep_type = ep_config->ep_type;
3071
3072 if (ucc->ep_type != PSIL_EP_NATIVE) {
3073 const struct udma_match_data *match_data = ud->match_data;
3074
3075 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3076 ucc->enable_acc32 = ep_config->pdma_acc32;
3077 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3078 ucc->enable_burst = ep_config->pdma_burst;
3079 }
3080
3081 ucc->needs_epib = ep_config->needs_epib;
3082 ucc->psd_size = ep_config->psd_size;
3083 ucc->metadata_size =
3084 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3085 ucc->psd_size;
3086
3087 if (ucc->pkt_mode)
3088 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3089 ucc->metadata_size, ud->desc_align);
3090
3091 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3092 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3093
3094 return true;
3095}
3096
3097static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3098 struct of_dma *ofdma)
3099{
3100 struct udma_dev *ud = ofdma->of_dma_data;
3101 dma_cap_mask_t mask = ud->ddev.cap_mask;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003102 struct udma_filter_param filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003103 struct dma_chan *chan;
3104
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003105 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003106 return NULL;
3107
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003108 filter_param.remote_thread_id = dma_spec->args[0];
3109 if (dma_spec->args_count == 2)
3110 filter_param.atype = dma_spec->args[1];
3111 else
3112 filter_param.atype = 0;
3113
3114 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
3115 ofdma->of_node);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003116 if (!chan) {
3117 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3118 return ERR_PTR(-EINVAL);
3119 }
3120
3121 return chan;
3122}
3123
3124static struct udma_match_data am654_main_data = {
3125 .psil_base = 0x1000,
3126 .enable_memcpy_support = true,
3127 .statictr_z_mask = GENMASK(11, 0),
3128 .rchan_oes_offset = 0x2000,
3129 .tpl_levels = 2,
3130 .level_start_idx = {
3131 [0] = 8, /* Normal channels */
3132 [1] = 0, /* High Throughput channels */
3133 },
3134};
3135
3136static struct udma_match_data am654_mcu_data = {
3137 .psil_base = 0x6000,
Peter Ujfalusia4e68852020-03-27 16:42:28 +02003138 .enable_memcpy_support = false,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003139 .statictr_z_mask = GENMASK(11, 0),
3140 .rchan_oes_offset = 0x2000,
3141 .tpl_levels = 2,
3142 .level_start_idx = {
3143 [0] = 2, /* Normal channels */
3144 [1] = 0, /* High Throughput channels */
3145 },
3146};
3147
3148static struct udma_match_data j721e_main_data = {
3149 .psil_base = 0x1000,
3150 .enable_memcpy_support = true,
3151 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3152 .statictr_z_mask = GENMASK(23, 0),
3153 .rchan_oes_offset = 0x400,
3154 .tpl_levels = 3,
3155 .level_start_idx = {
3156 [0] = 16, /* Normal channels */
3157 [1] = 4, /* High Throughput channels */
3158 [2] = 0, /* Ultra High Throughput channels */
3159 },
3160};
3161
3162static struct udma_match_data j721e_mcu_data = {
3163 .psil_base = 0x6000,
3164 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3165 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3166 .statictr_z_mask = GENMASK(23, 0),
3167 .rchan_oes_offset = 0x400,
3168 .tpl_levels = 2,
3169 .level_start_idx = {
3170 [0] = 2, /* Normal channels */
3171 [1] = 0, /* High Throughput channels */
3172 },
3173};
3174
3175static const struct of_device_id udma_of_match[] = {
3176 {
3177 .compatible = "ti,am654-navss-main-udmap",
3178 .data = &am654_main_data,
3179 },
3180 {
3181 .compatible = "ti,am654-navss-mcu-udmap",
3182 .data = &am654_mcu_data,
3183 }, {
3184 .compatible = "ti,j721e-navss-main-udmap",
3185 .data = &j721e_main_data,
3186 }, {
3187 .compatible = "ti,j721e-navss-mcu-udmap",
3188 .data = &j721e_mcu_data,
3189 },
3190 { /* Sentinel */ },
3191};
3192
3193static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3194{
3195 struct resource *res;
3196 int i;
3197
3198 for (i = 0; i < MMR_LAST; i++) {
3199 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3200 mmr_names[i]);
3201 ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
3202 if (IS_ERR(ud->mmrs[i]))
3203 return PTR_ERR(ud->mmrs[i]);
3204 }
3205
3206 return 0;
3207}
3208
3209static int udma_setup_resources(struct udma_dev *ud)
3210{
3211 struct device *dev = ud->dev;
3212 int ch_count, ret, i, j;
3213 u32 cap2, cap3;
3214 struct ti_sci_resource_desc *rm_desc;
3215 struct ti_sci_resource *rm_res, irq_res;
3216 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3217 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3218 "ti,sci-rm-range-rchan",
3219 "ti,sci-rm-range-rflow" };
3220
3221 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
3222 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
3223
3224 ud->rflow_cnt = cap3 & 0x3fff;
3225 ud->tchan_cnt = cap2 & 0x1ff;
3226 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
3227 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
3228 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3229
3230 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3231 sizeof(unsigned long), GFP_KERNEL);
3232 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3233 GFP_KERNEL);
3234 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3235 sizeof(unsigned long), GFP_KERNEL);
3236 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3237 GFP_KERNEL);
3238 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3239 sizeof(unsigned long),
3240 GFP_KERNEL);
3241 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3242 BITS_TO_LONGS(ud->rflow_cnt),
3243 sizeof(unsigned long),
3244 GFP_KERNEL);
3245 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3246 sizeof(unsigned long),
3247 GFP_KERNEL);
3248 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3249 GFP_KERNEL);
3250
3251 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3252 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3253 !ud->rflows || !ud->rflow_in_use)
3254 return -ENOMEM;
3255
3256 /*
3257 * RX flows with the same Ids as RX channels are reserved to be used
3258 * as default flows if remote HW can't generate flow_ids. Those
3259 * RX flows can be requested only explicitly by id.
3260 */
3261 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3262
3263 /* by default no GP rflows are assigned to Linux */
3264 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3265
3266 /* Get resource ranges from tisci */
3267 for (i = 0; i < RM_RANGE_LAST; i++)
3268 tisci_rm->rm_ranges[i] =
3269 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3270 tisci_rm->tisci_dev_id,
3271 (char *)range_names[i]);
3272
3273 /* tchan ranges */
3274 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3275 if (IS_ERR(rm_res)) {
3276 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3277 } else {
3278 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3279 for (i = 0; i < rm_res->sets; i++) {
3280 rm_desc = &rm_res->desc[i];
3281 bitmap_clear(ud->tchan_map, rm_desc->start,
3282 rm_desc->num);
3283 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3284 rm_desc->start, rm_desc->num);
3285 }
3286 }
3287 irq_res.sets = rm_res->sets;
3288
3289 /* rchan and matching default flow ranges */
3290 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3291 if (IS_ERR(rm_res)) {
3292 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3293 } else {
3294 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3295 for (i = 0; i < rm_res->sets; i++) {
3296 rm_desc = &rm_res->desc[i];
3297 bitmap_clear(ud->rchan_map, rm_desc->start,
3298 rm_desc->num);
3299 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3300 rm_desc->start, rm_desc->num);
3301 }
3302 }
3303
3304 irq_res.sets += rm_res->sets;
3305 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3306 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3307 for (i = 0; i < rm_res->sets; i++) {
3308 irq_res.desc[i].start = rm_res->desc[i].start;
3309 irq_res.desc[i].num = rm_res->desc[i].num;
3310 }
3311 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3312 for (j = 0; j < rm_res->sets; j++, i++) {
3313 irq_res.desc[i].start = rm_res->desc[j].start +
3314 ud->match_data->rchan_oes_offset;
3315 irq_res.desc[i].num = rm_res->desc[j].num;
3316 }
3317 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3318 kfree(irq_res.desc);
3319 if (ret) {
3320 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3321 return ret;
3322 }
3323
3324 /* GP rflow ranges */
3325 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3326 if (IS_ERR(rm_res)) {
3327 /* all gp flows are assigned exclusively to Linux */
3328 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3329 ud->rflow_cnt - ud->rchan_cnt);
3330 } else {
3331 for (i = 0; i < rm_res->sets; i++) {
3332 rm_desc = &rm_res->desc[i];
3333 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3334 rm_desc->num);
3335 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3336 rm_desc->start, rm_desc->num);
3337 }
3338 }
3339
3340 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3341 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3342 if (!ch_count)
3343 return -ENODEV;
3344
3345 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3346 GFP_KERNEL);
3347 if (!ud->channels)
3348 return -ENOMEM;
3349
3350 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3351 ch_count,
3352 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3353 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3354 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3355 ud->rflow_cnt));
3356
3357 return ch_count;
3358}
3359
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003360static int udma_setup_rx_flush(struct udma_dev *ud)
3361{
3362 struct udma_rx_flush *rx_flush = &ud->rx_flush;
3363 struct cppi5_desc_hdr_t *tr_desc;
3364 struct cppi5_tr_type1_t *tr_req;
3365 struct cppi5_host_desc_t *desc;
3366 struct device *dev = ud->dev;
3367 struct udma_hwdesc *hwdesc;
3368 size_t tr_size;
3369
3370 /* Allocate 1K buffer for discarded data on RX channel teardown */
3371 rx_flush->buffer_size = SZ_1K;
3372 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3373 GFP_KERNEL);
3374 if (!rx_flush->buffer_vaddr)
3375 return -ENOMEM;
3376
3377 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3378 rx_flush->buffer_size,
3379 DMA_TO_DEVICE);
3380 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3381 return -ENOMEM;
3382
3383 /* Set up descriptor to be used for TR mode */
3384 hwdesc = &rx_flush->hwdescs[0];
3385 tr_size = sizeof(struct cppi5_tr_type1_t);
3386 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3387 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3388 ud->desc_align);
3389
3390 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3391 GFP_KERNEL);
3392 if (!hwdesc->cppi5_desc_vaddr)
3393 return -ENOMEM;
3394
3395 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3396 hwdesc->cppi5_desc_size,
3397 DMA_TO_DEVICE);
3398 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3399 return -ENOMEM;
3400
3401 /* Start of the TR req records */
3402 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3403 /* Start address of the TR response array */
3404 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3405
3406 tr_desc = hwdesc->cppi5_desc_vaddr;
3407 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3408 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3409 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3410
3411 tr_req = hwdesc->tr_req_base;
3412 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3413 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3414 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3415
3416 tr_req->addr = rx_flush->buffer_paddr;
3417 tr_req->icnt0 = rx_flush->buffer_size;
3418 tr_req->icnt1 = 1;
3419
Peter Ujfalusi5bbeea32020-05-12 16:45:44 +03003420 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3421 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3422
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003423 /* Set up descriptor to be used for packet mode */
3424 hwdesc = &rx_flush->hwdescs[1];
3425 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3426 CPPI5_INFO0_HDESC_EPIB_SIZE +
3427 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3428 ud->desc_align);
3429
3430 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3431 GFP_KERNEL);
3432 if (!hwdesc->cppi5_desc_vaddr)
3433 return -ENOMEM;
3434
3435 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3436 hwdesc->cppi5_desc_size,
3437 DMA_TO_DEVICE);
3438 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3439 return -ENOMEM;
3440
3441 desc = hwdesc->cppi5_desc_vaddr;
3442 cppi5_hdesc_init(desc, 0, 0);
3443 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3444 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3445
3446 cppi5_hdesc_attach_buf(desc,
3447 rx_flush->buffer_paddr, rx_flush->buffer_size,
3448 rx_flush->buffer_paddr, rx_flush->buffer_size);
3449
3450 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3451 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3452 return 0;
3453}
3454
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003455#ifdef CONFIG_DEBUG_FS
3456static void udma_dbg_summary_show_chan(struct seq_file *s,
3457 struct dma_chan *chan)
3458{
3459 struct udma_chan *uc = to_udma_chan(chan);
3460 struct udma_chan_config *ucc = &uc->config;
3461
3462 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
3463 chan->dbg_client_name ?: "in-use");
3464 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
3465
3466 switch (uc->config.dir) {
3467 case DMA_MEM_TO_MEM:
3468 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
3469 ucc->src_thread, ucc->dst_thread);
3470 break;
3471 case DMA_DEV_TO_MEM:
3472 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
3473 ucc->src_thread, ucc->dst_thread);
3474 break;
3475 case DMA_MEM_TO_DEV:
3476 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
3477 ucc->src_thread, ucc->dst_thread);
3478 break;
3479 default:
3480 seq_printf(s, ")\n");
3481 return;
3482 }
3483
3484 if (ucc->ep_type == PSIL_EP_NATIVE) {
3485 seq_printf(s, "PSI-L Native");
3486 if (ucc->metadata_size) {
3487 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
3488 if (ucc->psd_size)
3489 seq_printf(s, " PSDsize:%u", ucc->psd_size);
3490 seq_printf(s, " ]");
3491 }
3492 } else {
3493 seq_printf(s, "PDMA");
3494 if (ucc->enable_acc32 || ucc->enable_burst)
3495 seq_printf(s, "[%s%s ]",
3496 ucc->enable_acc32 ? " ACC32" : "",
3497 ucc->enable_burst ? " BURST" : "");
3498 }
3499
3500 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
3501}
3502
3503static void udma_dbg_summary_show(struct seq_file *s,
3504 struct dma_device *dma_dev)
3505{
3506 struct dma_chan *chan;
3507
3508 list_for_each_entry(chan, &dma_dev->channels, device_node) {
3509 if (chan->client_count)
3510 udma_dbg_summary_show_chan(s, chan);
3511 }
3512}
3513#endif /* CONFIG_DEBUG_FS */
3514
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003515#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3516 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3517 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3518 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3519 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3520
3521static int udma_probe(struct platform_device *pdev)
3522{
3523 struct device_node *navss_node = pdev->dev.parent->of_node;
3524 struct device *dev = &pdev->dev;
3525 struct udma_dev *ud;
3526 const struct of_device_id *match;
3527 int i, ret;
3528 int ch_count;
3529
3530 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3531 if (ret)
3532 dev_err(dev, "failed to set dma mask stuff\n");
3533
3534 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3535 if (!ud)
3536 return -ENOMEM;
3537
3538 ret = udma_get_mmrs(pdev, ud);
3539 if (ret)
3540 return ret;
3541
3542 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3543 if (IS_ERR(ud->tisci_rm.tisci))
3544 return PTR_ERR(ud->tisci_rm.tisci);
3545
3546 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3547 &ud->tisci_rm.tisci_dev_id);
3548 if (ret) {
3549 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3550 return ret;
3551 }
3552 pdev->id = ud->tisci_rm.tisci_dev_id;
3553
3554 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3555 &ud->tisci_rm.tisci_navss_dev_id);
3556 if (ret) {
3557 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3558 return ret;
3559 }
3560
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003561 ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
3562 if (!ret && ud->atype > 2) {
3563 dev_err(dev, "Invalid atype: %u\n", ud->atype);
3564 return -EINVAL;
3565 }
3566
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003567 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3568 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3569
3570 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3571 if (IS_ERR(ud->ringacc))
3572 return PTR_ERR(ud->ringacc);
3573
3574 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3575 DOMAIN_BUS_TI_SCI_INTA_MSI);
3576 if (!dev->msi_domain) {
3577 dev_err(dev, "Failed to get MSI domain\n");
3578 return -EPROBE_DEFER;
3579 }
3580
3581 match = of_match_node(udma_of_match, dev->of_node);
3582 if (!match) {
3583 dev_err(dev, "No compatible match found\n");
3584 return -ENODEV;
3585 }
3586 ud->match_data = match->data;
3587
3588 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3589 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3590
3591 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3592 ud->ddev.device_config = udma_slave_config;
3593 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3594 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3595 ud->ddev.device_issue_pending = udma_issue_pending;
3596 ud->ddev.device_tx_status = udma_tx_status;
3597 ud->ddev.device_pause = udma_pause;
3598 ud->ddev.device_resume = udma_resume;
3599 ud->ddev.device_terminate_all = udma_terminate_all;
3600 ud->ddev.device_synchronize = udma_synchronize;
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003601#ifdef CONFIG_DEBUG_FS
3602 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3603#endif
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003604
3605 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3606 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3607 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3608 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3609 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3610 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3611 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3612 DESC_METADATA_ENGINE;
3613 if (ud->match_data->enable_memcpy_support) {
3614 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3615 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3616 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3617 }
3618
3619 ud->ddev.dev = dev;
3620 ud->dev = dev;
3621 ud->psil_base = ud->match_data->psil_base;
3622
3623 INIT_LIST_HEAD(&ud->ddev.channels);
3624 INIT_LIST_HEAD(&ud->desc_to_purge);
3625
3626 ch_count = udma_setup_resources(ud);
3627 if (ch_count <= 0)
3628 return ch_count;
3629
3630 spin_lock_init(&ud->lock);
3631 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3632
3633 ud->desc_align = 64;
3634 if (ud->desc_align < dma_get_cache_alignment())
3635 ud->desc_align = dma_get_cache_alignment();
3636
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003637 ret = udma_setup_rx_flush(ud);
3638 if (ret)
3639 return ret;
3640
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003641 for (i = 0; i < ud->tchan_cnt; i++) {
3642 struct udma_tchan *tchan = &ud->tchans[i];
3643
3644 tchan->id = i;
3645 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3646 }
3647
3648 for (i = 0; i < ud->rchan_cnt; i++) {
3649 struct udma_rchan *rchan = &ud->rchans[i];
3650
3651 rchan->id = i;
3652 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3653 }
3654
3655 for (i = 0; i < ud->rflow_cnt; i++) {
3656 struct udma_rflow *rflow = &ud->rflows[i];
3657
3658 rflow->id = i;
3659 }
3660
3661 for (i = 0; i < ch_count; i++) {
3662 struct udma_chan *uc = &ud->channels[i];
3663
3664 uc->ud = ud;
3665 uc->vc.desc_free = udma_desc_free;
3666 uc->id = i;
3667 uc->tchan = NULL;
3668 uc->rchan = NULL;
3669 uc->config.remote_thread_id = -1;
3670 uc->config.dir = DMA_MEM_TO_MEM;
3671 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3672 dev_name(dev), i);
3673
3674 vchan_init(&uc->vc, &ud->ddev);
3675 /* Use custom vchan completion handling */
3676 tasklet_init(&uc->vc.task, udma_vchan_complete,
3677 (unsigned long)&uc->vc);
3678 init_completion(&uc->teardown_completed);
3679 }
3680
3681 ret = dma_async_device_register(&ud->ddev);
3682 if (ret) {
3683 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3684 return ret;
3685 }
3686
3687 platform_set_drvdata(pdev, ud);
3688
3689 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3690 if (ret) {
3691 dev_err(dev, "failed to register of_dma controller\n");
3692 dma_async_device_unregister(&ud->ddev);
3693 }
3694
3695 return ret;
3696}
3697
3698static struct platform_driver udma_driver = {
3699 .driver = {
3700 .name = "ti-udma",
3701 .of_match_table = udma_of_match,
3702 .suppress_bind_attrs = true,
3703 },
3704 .probe = udma_probe,
3705};
3706builtin_platform_driver(udma_driver);
Grygorii Strashkod7024192019-12-23 13:04:51 +02003707
3708/* Private interfaces to UDMA */
3709#include "k3-udma-private.c"