blob: 2fb294c975642115db94ea4bdd0454de82426bc2 [file] [log] [blame]
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7#include <linux/kernel.h>
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02008#include <linux/delay.h>
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02009#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmapool.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_device.h>
22#include <linux/of_irq.h>
23#include <linux/workqueue.h>
24#include <linux/completion.h>
25#include <linux/soc/ti/k3-ringacc.h>
26#include <linux/soc/ti/ti_sci_protocol.h>
27#include <linux/soc/ti/ti_sci_inta_msi.h>
28#include <linux/dma/ti-cppi5.h>
29
30#include "../virt-dma.h"
31#include "k3-udma.h"
32#include "k3-psil-priv.h"
33
34struct udma_static_tr {
35 u8 elsize; /* RPSTR0 */
36 u16 elcnt; /* RPSTR0 */
37 u16 bstcnt; /* RPSTR1 */
38};
39
40#define K3_UDMA_MAX_RFLOWS 1024
41#define K3_UDMA_DEFAULT_RING_SIZE 16
42
43/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44#define UDMA_RFLOW_SRCTAG_NONE 0
45#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
46#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
47#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
48
49#define UDMA_RFLOW_DSTTAG_NONE 0
50#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
51#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
52#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
53#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
54
55struct udma_chan;
56
57enum udma_mmr {
58 MMR_GCFG = 0,
59 MMR_RCHANRT,
60 MMR_TCHANRT,
61 MMR_LAST,
62};
63
64static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
65
66struct udma_tchan {
67 void __iomem *reg_rt;
68
69 int id;
70 struct k3_ring *t_ring; /* Transmit ring */
71 struct k3_ring *tc_ring; /* Transmit Completion ring */
72};
73
74struct udma_rflow {
75 int id;
76 struct k3_ring *fd_ring; /* Free Descriptor ring */
77 struct k3_ring *r_ring; /* Receive ring */
78};
79
80struct udma_rchan {
81 void __iomem *reg_rt;
82
83 int id;
84};
85
86#define UDMA_FLAG_PDMA_ACC32 BIT(0)
87#define UDMA_FLAG_PDMA_BURST BIT(1)
88
89struct udma_match_data {
90 u32 psil_base;
91 bool enable_memcpy_support;
92 u32 flags;
93 u32 statictr_z_mask;
94 u32 rchan_oes_offset;
95
96 u8 tpl_levels;
97 u32 level_start_idx[];
98};
99
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200100struct udma_hwdesc {
101 size_t cppi5_desc_size;
102 void *cppi5_desc_vaddr;
103 dma_addr_t cppi5_desc_paddr;
104
105 /* TR descriptor internal pointers */
106 void *tr_req_base;
107 struct cppi5_tr_resp_t *tr_resp_base;
108};
109
110struct udma_rx_flush {
111 struct udma_hwdesc hwdescs[2];
112
113 size_t buffer_size;
114 void *buffer_vaddr;
115 dma_addr_t buffer_paddr;
116};
117
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200118struct udma_dev {
119 struct dma_device ddev;
120 struct device *dev;
121 void __iomem *mmrs[MMR_LAST];
122 const struct udma_match_data *match_data;
123
124 size_t desc_align; /* alignment to use for descriptors */
125
126 struct udma_tisci_rm tisci_rm;
127
128 struct k3_ringacc *ringacc;
129
130 struct work_struct purge_work;
131 struct list_head desc_to_purge;
132 spinlock_t lock;
133
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200134 struct udma_rx_flush rx_flush;
135
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200136 int tchan_cnt;
137 int echan_cnt;
138 int rchan_cnt;
139 int rflow_cnt;
140 unsigned long *tchan_map;
141 unsigned long *rchan_map;
142 unsigned long *rflow_gp_map;
143 unsigned long *rflow_gp_map_allocated;
144 unsigned long *rflow_in_use;
145
146 struct udma_tchan *tchans;
147 struct udma_rchan *rchans;
148 struct udma_rflow *rflows;
149
150 struct udma_chan *channels;
151 u32 psil_base;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200152 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200153};
154
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200155struct udma_desc {
156 struct virt_dma_desc vd;
157
158 bool terminated;
159
160 enum dma_transfer_direction dir;
161
162 struct udma_static_tr static_tr;
163 u32 residue;
164
165 unsigned int sglen;
166 unsigned int desc_idx; /* Only used for cyclic in packet mode */
167 unsigned int tr_idx;
168
169 u32 metadata_size;
170 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
171
172 unsigned int hwdesc_count;
173 struct udma_hwdesc hwdesc[0];
174};
175
176enum udma_chan_state {
177 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
178 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
179 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
180};
181
182struct udma_tx_drain {
183 struct delayed_work work;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200184 ktime_t tstamp;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200185 u32 residue;
186};
187
188struct udma_chan_config {
189 bool pkt_mode; /* TR or packet */
190 bool needs_epib; /* EPIB is needed for the communication or not */
191 u32 psd_size; /* size of Protocol Specific Data */
192 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
193 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
194 bool notdpkt; /* Suppress sending TDC packet */
195 int remote_thread_id;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200196 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200197 u32 src_thread;
198 u32 dst_thread;
199 enum psil_endpoint_type ep_type;
200 bool enable_acc32;
201 bool enable_burst;
202 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
203
204 enum dma_transfer_direction dir;
205};
206
207struct udma_chan {
208 struct virt_dma_chan vc;
209 struct dma_slave_config cfg;
210 struct udma_dev *ud;
211 struct udma_desc *desc;
212 struct udma_desc *terminated_desc;
213 struct udma_static_tr static_tr;
214 char *name;
215
216 struct udma_tchan *tchan;
217 struct udma_rchan *rchan;
218 struct udma_rflow *rflow;
219
220 bool psil_paired;
221
222 int irq_num_ring;
223 int irq_num_udma;
224
225 bool cyclic;
226 bool paused;
227
228 enum udma_chan_state state;
229 struct completion teardown_completed;
230
231 struct udma_tx_drain tx_drain;
232
233 u32 bcnt; /* number of bytes completed since the start of the channel */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200234
235 /* Channel configuration parameters */
236 struct udma_chan_config config;
237
238 /* dmapool for packet mode descriptors */
239 bool use_dma_pool;
240 struct dma_pool *hdesc_pool;
241
242 u32 id;
243};
244
245static inline struct udma_dev *to_udma_dev(struct dma_device *d)
246{
247 return container_of(d, struct udma_dev, ddev);
248}
249
250static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
251{
252 return container_of(c, struct udma_chan, vc.chan);
253}
254
255static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
256{
257 return container_of(t, struct udma_desc, vd.tx);
258}
259
260/* Generic register access functions */
261static inline u32 udma_read(void __iomem *base, int reg)
262{
263 return readl(base + reg);
264}
265
266static inline void udma_write(void __iomem *base, int reg, u32 val)
267{
268 writel(val, base + reg);
269}
270
271static inline void udma_update_bits(void __iomem *base, int reg,
272 u32 mask, u32 val)
273{
274 u32 tmp, orig;
275
276 orig = readl(base + reg);
277 tmp = orig & ~mask;
278 tmp |= (val & mask);
279
280 if (tmp != orig)
281 writel(tmp, base + reg);
282}
283
284/* TCHANRT */
285static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
286{
287 if (!tchan)
288 return 0;
289 return udma_read(tchan->reg_rt, reg);
290}
291
292static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
293 u32 val)
294{
295 if (!tchan)
296 return;
297 udma_write(tchan->reg_rt, reg, val);
298}
299
300static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
301 u32 mask, u32 val)
302{
303 if (!tchan)
304 return;
305 udma_update_bits(tchan->reg_rt, reg, mask, val);
306}
307
308/* RCHANRT */
309static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
310{
311 if (!rchan)
312 return 0;
313 return udma_read(rchan->reg_rt, reg);
314}
315
316static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
317 u32 val)
318{
319 if (!rchan)
320 return;
321 udma_write(rchan->reg_rt, reg, val);
322}
323
324static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
325 u32 mask, u32 val)
326{
327 if (!rchan)
328 return;
329 udma_update_bits(rchan->reg_rt, reg, mask, val);
330}
331
332static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
333{
334 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
335
336 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
337 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
338 tisci_rm->tisci_navss_dev_id,
339 src_thread, dst_thread);
340}
341
342static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
343 u32 dst_thread)
344{
345 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
346
347 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
348 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
349 tisci_rm->tisci_navss_dev_id,
350 src_thread, dst_thread);
351}
352
353static void udma_reset_uchan(struct udma_chan *uc)
354{
355 memset(&uc->config, 0, sizeof(uc->config));
356 uc->config.remote_thread_id = -1;
357 uc->state = UDMA_CHAN_IS_IDLE;
358}
359
360static void udma_dump_chan_stdata(struct udma_chan *uc)
361{
362 struct device *dev = uc->ud->dev;
363 u32 offset;
364 int i;
365
366 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
367 dev_dbg(dev, "TCHAN State data:\n");
368 for (i = 0; i < 32; i++) {
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300369 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200370 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
371 udma_tchanrt_read(uc->tchan, offset));
372 }
373 }
374
375 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
376 dev_dbg(dev, "RCHAN State data:\n");
377 for (i = 0; i < 32; i++) {
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300378 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200379 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
380 udma_rchanrt_read(uc->rchan, offset));
381 }
382 }
383}
384
385static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
386 int idx)
387{
388 return d->hwdesc[idx].cppi5_desc_paddr;
389}
390
391static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
392{
393 return d->hwdesc[idx].cppi5_desc_vaddr;
394}
395
396static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
397 dma_addr_t paddr)
398{
399 struct udma_desc *d = uc->terminated_desc;
400
401 if (d) {
402 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
403 d->desc_idx);
404
405 if (desc_paddr != paddr)
406 d = NULL;
407 }
408
409 if (!d) {
410 d = uc->desc;
411 if (d) {
412 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
413 d->desc_idx);
414
415 if (desc_paddr != paddr)
416 d = NULL;
417 }
418 }
419
420 return d;
421}
422
423static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
424{
425 if (uc->use_dma_pool) {
426 int i;
427
428 for (i = 0; i < d->hwdesc_count; i++) {
429 if (!d->hwdesc[i].cppi5_desc_vaddr)
430 continue;
431
432 dma_pool_free(uc->hdesc_pool,
433 d->hwdesc[i].cppi5_desc_vaddr,
434 d->hwdesc[i].cppi5_desc_paddr);
435
436 d->hwdesc[i].cppi5_desc_vaddr = NULL;
437 }
438 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
439 struct udma_dev *ud = uc->ud;
440
441 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
442 d->hwdesc[0].cppi5_desc_vaddr,
443 d->hwdesc[0].cppi5_desc_paddr);
444
445 d->hwdesc[0].cppi5_desc_vaddr = NULL;
446 }
447}
448
449static void udma_purge_desc_work(struct work_struct *work)
450{
451 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
452 struct virt_dma_desc *vd, *_vd;
453 unsigned long flags;
454 LIST_HEAD(head);
455
456 spin_lock_irqsave(&ud->lock, flags);
457 list_splice_tail_init(&ud->desc_to_purge, &head);
458 spin_unlock_irqrestore(&ud->lock, flags);
459
460 list_for_each_entry_safe(vd, _vd, &head, node) {
461 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
462 struct udma_desc *d = to_udma_desc(&vd->tx);
463
464 udma_free_hwdesc(uc, d);
465 list_del(&vd->node);
466 kfree(d);
467 }
468
469 /* If more to purge, schedule the work again */
470 if (!list_empty(&ud->desc_to_purge))
471 schedule_work(&ud->purge_work);
472}
473
474static void udma_desc_free(struct virt_dma_desc *vd)
475{
476 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
477 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
478 struct udma_desc *d = to_udma_desc(&vd->tx);
479 unsigned long flags;
480
481 if (uc->terminated_desc == d)
482 uc->terminated_desc = NULL;
483
484 if (uc->use_dma_pool) {
485 udma_free_hwdesc(uc, d);
486 kfree(d);
487 return;
488 }
489
490 spin_lock_irqsave(&ud->lock, flags);
491 list_add_tail(&vd->node, &ud->desc_to_purge);
492 spin_unlock_irqrestore(&ud->lock, flags);
493
494 schedule_work(&ud->purge_work);
495}
496
497static bool udma_is_chan_running(struct udma_chan *uc)
498{
499 u32 trt_ctl = 0;
500 u32 rrt_ctl = 0;
501
502 if (uc->tchan)
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300503 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_CTL_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200504 if (uc->rchan)
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300505 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_CHAN_RT_CTL_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200506
507 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
508 return true;
509
510 return false;
511}
512
513static bool udma_is_chan_paused(struct udma_chan *uc)
514{
515 u32 val, pause_mask;
516
Peter Ujfalusic7450bb2020-02-14 11:14:40 +0200517 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200518 case DMA_DEV_TO_MEM:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300519 val = udma_rchanrt_read(uc->rchan, UDMA_CHAN_RT_PEER_RT_EN_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200520 pause_mask = UDMA_PEER_RT_EN_PAUSE;
521 break;
522 case DMA_MEM_TO_DEV:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300523 val = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_PEER_RT_EN_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200524 pause_mask = UDMA_PEER_RT_EN_PAUSE;
525 break;
526 case DMA_MEM_TO_MEM:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300527 val = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_CTL_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200528 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
529 break;
530 default:
531 return false;
532 }
533
534 if (val & pause_mask)
535 return true;
536
537 return false;
538}
539
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200540static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
541{
542 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
543}
544
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200545static int udma_push_to_ring(struct udma_chan *uc, int idx)
546{
547 struct udma_desc *d = uc->desc;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200548 struct k3_ring *ring = NULL;
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200549 dma_addr_t paddr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200550
551 switch (uc->config.dir) {
552 case DMA_DEV_TO_MEM:
553 ring = uc->rflow->fd_ring;
554 break;
555 case DMA_MEM_TO_DEV:
556 case DMA_MEM_TO_MEM:
557 ring = uc->tchan->t_ring;
558 break;
559 default:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200560 return -EINVAL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200561 }
562
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200563 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
564 if (idx == -1) {
565 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
566 } else {
567 paddr = udma_curr_cppi5_desc_paddr(d, idx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200568
569 wmb(); /* Ensure that writes are not moved over this point */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200570 }
571
Peter Ujfalusi6fea8732020-05-12 16:46:11 +0300572 return k3_ringacc_ring_push(ring, &paddr);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200573}
574
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200575static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
576{
577 if (uc->config.dir != DMA_DEV_TO_MEM)
578 return false;
579
580 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
581 return true;
582
583 return false;
584}
585
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200586static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
587{
588 struct k3_ring *ring = NULL;
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300589 int ret;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200590
591 switch (uc->config.dir) {
592 case DMA_DEV_TO_MEM:
593 ring = uc->rflow->r_ring;
594 break;
595 case DMA_MEM_TO_DEV:
596 case DMA_MEM_TO_MEM:
597 ring = uc->tchan->tc_ring;
598 break;
599 default:
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300600 return -ENOENT;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200601 }
602
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300603 ret = k3_ringacc_ring_pop(ring, addr);
604 if (ret)
605 return ret;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200606
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300607 rmb(); /* Ensure that reads are not moved before this point */
Peter Ujfalusi2166d962020-07-07 13:23:48 +0300608
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300609 /* Teardown completion */
610 if (cppi5_desc_is_tdcm(*addr))
611 return 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200612
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300613 /* Check for flush descriptor */
614 if (udma_desc_is_rx_flush(uc, *addr))
615 return -ENOENT;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200616
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300617 return 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200618}
619
620static void udma_reset_rings(struct udma_chan *uc)
621{
622 struct k3_ring *ring1 = NULL;
623 struct k3_ring *ring2 = NULL;
624
625 switch (uc->config.dir) {
626 case DMA_DEV_TO_MEM:
627 if (uc->rchan) {
628 ring1 = uc->rflow->fd_ring;
629 ring2 = uc->rflow->r_ring;
630 }
631 break;
632 case DMA_MEM_TO_DEV:
633 case DMA_MEM_TO_MEM:
634 if (uc->tchan) {
635 ring1 = uc->tchan->t_ring;
636 ring2 = uc->tchan->tc_ring;
637 }
638 break;
639 default:
640 break;
641 }
642
643 if (ring1)
644 k3_ringacc_ring_reset_dma(ring1,
645 k3_ringacc_ring_get_occ(ring1));
646 if (ring2)
647 k3_ringacc_ring_reset(ring2);
648
649 /* make sure we are not leaking memory by stalled descriptor */
650 if (uc->terminated_desc) {
651 udma_desc_free(&uc->terminated_desc->vd);
652 uc->terminated_desc = NULL;
653 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200654}
655
656static void udma_reset_counters(struct udma_chan *uc)
657{
658 u32 val;
659
660 if (uc->tchan) {
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300661 val = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_BCNT_REG);
662 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_BCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200663
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300664 val = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_SBCNT_REG);
665 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_SBCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200666
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300667 val = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_PCNT_REG);
668 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_PCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200669
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300670 val = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_PEER_BCNT_REG);
671 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_PEER_BCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200672 }
673
674 if (uc->rchan) {
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300675 val = udma_rchanrt_read(uc->rchan, UDMA_CHAN_RT_BCNT_REG);
676 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_BCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200677
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300678 val = udma_rchanrt_read(uc->rchan, UDMA_CHAN_RT_SBCNT_REG);
679 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_SBCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200680
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300681 val = udma_rchanrt_read(uc->rchan, UDMA_CHAN_RT_PCNT_REG);
682 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_PCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200683
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300684 val = udma_rchanrt_read(uc->rchan, UDMA_CHAN_RT_PEER_BCNT_REG);
685 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_PEER_BCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200686 }
687
688 uc->bcnt = 0;
689}
690
691static int udma_reset_chan(struct udma_chan *uc, bool hard)
692{
693 switch (uc->config.dir) {
694 case DMA_DEV_TO_MEM:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300695 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
696 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_CTL_REG, 0);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200697 break;
698 case DMA_MEM_TO_DEV:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300699 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_CTL_REG, 0);
700 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200701 break;
702 case DMA_MEM_TO_MEM:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300703 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_CTL_REG, 0);
704 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_CTL_REG, 0);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200705 break;
706 default:
707 return -EINVAL;
708 }
709
710 /* Reset all counters */
711 udma_reset_counters(uc);
712
713 /* Hard reset: re-initialize the channel to reset */
714 if (hard) {
715 struct udma_chan_config ucc_backup;
716 int ret;
717
718 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
719 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
720
721 /* restore the channel configuration */
722 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
723 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
724 if (ret)
725 return ret;
726
727 /*
728 * Setting forced teardown after forced reset helps recovering
729 * the rchan.
730 */
731 if (uc->config.dir == DMA_DEV_TO_MEM)
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300732 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200733 UDMA_CHAN_RT_CTL_EN |
734 UDMA_CHAN_RT_CTL_TDOWN |
735 UDMA_CHAN_RT_CTL_FTDOWN);
736 }
737 uc->state = UDMA_CHAN_IS_IDLE;
738
739 return 0;
740}
741
742static void udma_start_desc(struct udma_chan *uc)
743{
744 struct udma_chan_config *ucc = &uc->config;
745
746 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
747 int i;
748
749 /* Push all descriptors to ring for packet mode cyclic or RX */
750 for (i = 0; i < uc->desc->sglen; i++)
751 udma_push_to_ring(uc, i);
752 } else {
753 udma_push_to_ring(uc, 0);
754 }
755}
756
757static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
758{
759 /* Only PDMAs have staticTR */
760 if (uc->config.ep_type == PSIL_EP_NATIVE)
761 return false;
762
763 /* Check if the staticTR configuration has changed for TX */
764 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
765 return true;
766
767 return false;
768}
769
770static int udma_start(struct udma_chan *uc)
771{
772 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
773
774 if (!vd) {
775 uc->desc = NULL;
776 return -ENOENT;
777 }
778
779 list_del(&vd->node);
780
781 uc->desc = to_udma_desc(&vd->tx);
782
783 /* Channel is already running and does not need reconfiguration */
784 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
785 udma_start_desc(uc);
786 goto out;
787 }
788
789 /* Make sure that we clear the teardown bit, if it is set */
790 udma_reset_chan(uc, false);
791
792 /* Push descriptors before we start the channel */
793 udma_start_desc(uc);
794
795 switch (uc->desc->dir) {
796 case DMA_DEV_TO_MEM:
797 /* Config remote TR */
798 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
799 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
800 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
801 const struct udma_match_data *match_data =
802 uc->ud->match_data;
803
804 if (uc->config.enable_acc32)
805 val |= PDMA_STATIC_TR_XY_ACC32;
806 if (uc->config.enable_burst)
807 val |= PDMA_STATIC_TR_XY_BURST;
808
809 udma_rchanrt_write(uc->rchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300810 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200811
812 udma_rchanrt_write(uc->rchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300813 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200814 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
815 match_data->statictr_z_mask));
816
817 /* save the current staticTR configuration */
818 memcpy(&uc->static_tr, &uc->desc->static_tr,
819 sizeof(uc->static_tr));
820 }
821
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300822 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200823 UDMA_CHAN_RT_CTL_EN);
824
825 /* Enable remote */
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300826 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200827 UDMA_PEER_RT_EN_ENABLE);
828
829 break;
830 case DMA_MEM_TO_DEV:
831 /* Config remote TR */
832 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
833 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
834 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
835
836 if (uc->config.enable_acc32)
837 val |= PDMA_STATIC_TR_XY_ACC32;
838 if (uc->config.enable_burst)
839 val |= PDMA_STATIC_TR_XY_BURST;
840
841 udma_tchanrt_write(uc->tchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300842 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200843
844 /* save the current staticTR configuration */
845 memcpy(&uc->static_tr, &uc->desc->static_tr,
846 sizeof(uc->static_tr));
847 }
848
849 /* Enable remote */
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300850 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200851 UDMA_PEER_RT_EN_ENABLE);
852
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300853 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200854 UDMA_CHAN_RT_CTL_EN);
855
856 break;
857 case DMA_MEM_TO_MEM:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300858 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200859 UDMA_CHAN_RT_CTL_EN);
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300860 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200861 UDMA_CHAN_RT_CTL_EN);
862
863 break;
864 default:
865 return -EINVAL;
866 }
867
868 uc->state = UDMA_CHAN_IS_ACTIVE;
869out:
870
871 return 0;
872}
873
874static int udma_stop(struct udma_chan *uc)
875{
876 enum udma_chan_state old_state = uc->state;
877
878 uc->state = UDMA_CHAN_IS_TERMINATING;
879 reinit_completion(&uc->teardown_completed);
880
881 switch (uc->config.dir) {
882 case DMA_DEV_TO_MEM:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200883 if (!uc->cyclic && !uc->desc)
884 udma_push_to_ring(uc, -1);
885
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300886 udma_rchanrt_write(uc->rchan, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200887 UDMA_PEER_RT_EN_ENABLE |
888 UDMA_PEER_RT_EN_TEARDOWN);
889 break;
890 case DMA_MEM_TO_DEV:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300891 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200892 UDMA_PEER_RT_EN_ENABLE |
893 UDMA_PEER_RT_EN_FLUSH);
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300894 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200895 UDMA_CHAN_RT_CTL_EN |
896 UDMA_CHAN_RT_CTL_TDOWN);
897 break;
898 case DMA_MEM_TO_MEM:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300899 udma_tchanrt_write(uc->tchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200900 UDMA_CHAN_RT_CTL_EN |
901 UDMA_CHAN_RT_CTL_TDOWN);
902 break;
903 default:
904 uc->state = old_state;
905 complete_all(&uc->teardown_completed);
906 return -EINVAL;
907 }
908
909 return 0;
910}
911
912static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
913{
914 struct udma_desc *d = uc->desc;
915 struct cppi5_host_desc_t *h_desc;
916
917 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
918 cppi5_hdesc_reset_to_original(h_desc);
919 udma_push_to_ring(uc, d->desc_idx);
920 d->desc_idx = (d->desc_idx + 1) % d->sglen;
921}
922
923static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
924{
925 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
926
927 memcpy(d->metadata, h_desc->epib, d->metadata_size);
928}
929
930static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
931{
932 u32 peer_bcnt, bcnt;
933
934 /* Only TX towards PDMA is affected */
935 if (uc->config.ep_type == PSIL_EP_NATIVE ||
936 uc->config.dir != DMA_MEM_TO_DEV)
937 return true;
938
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300939 peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_PEER_BCNT_REG);
940 bcnt = udma_tchanrt_read(uc->tchan, UDMA_CHAN_RT_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200941
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200942 /* Transfer is incomplete, store current residue and time stamp */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200943 if (peer_bcnt < bcnt) {
944 uc->tx_drain.residue = bcnt - peer_bcnt;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200945 uc->tx_drain.tstamp = ktime_get();
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200946 return false;
947 }
948
949 return true;
950}
951
952static void udma_check_tx_completion(struct work_struct *work)
953{
954 struct udma_chan *uc = container_of(work, typeof(*uc),
955 tx_drain.work.work);
956 bool desc_done = true;
957 u32 residue_diff;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200958 ktime_t time_diff;
959 unsigned long delay;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200960
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200961 while (1) {
962 if (uc->desc) {
963 /* Get previous residue and time stamp */
964 residue_diff = uc->tx_drain.residue;
965 time_diff = uc->tx_drain.tstamp;
966 /*
967 * Get current residue and time stamp or see if
968 * transfer is complete
969 */
970 desc_done = udma_is_desc_really_done(uc, uc->desc);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200971 }
972
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200973 if (!desc_done) {
974 /*
975 * Find the time delta and residue delta w.r.t
976 * previous poll
977 */
978 time_diff = ktime_sub(uc->tx_drain.tstamp,
979 time_diff) + 1;
980 residue_diff -= uc->tx_drain.residue;
981 if (residue_diff) {
982 /*
983 * Try to guess when we should check
984 * next time by calculating rate at
985 * which data is being drained at the
986 * peer device
987 */
988 delay = (time_diff / residue_diff) *
989 uc->tx_drain.residue;
990 } else {
991 /* No progress, check again in 1 second */
992 schedule_delayed_work(&uc->tx_drain.work, HZ);
993 break;
994 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200995
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200996 usleep_range(ktime_to_us(delay),
997 ktime_to_us(delay) + 10);
998 continue;
999 }
1000
1001 if (uc->desc) {
1002 struct udma_desc *d = uc->desc;
1003
1004 uc->bcnt += d->residue;
1005 udma_start(uc);
1006 vchan_cookie_complete(&d->vd);
1007 break;
1008 }
1009
1010 break;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001011 }
1012}
1013
1014static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1015{
1016 struct udma_chan *uc = data;
1017 struct udma_desc *d;
1018 unsigned long flags;
1019 dma_addr_t paddr = 0;
1020
1021 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1022 return IRQ_HANDLED;
1023
1024 spin_lock_irqsave(&uc->vc.lock, flags);
1025
1026 /* Teardown completion message */
1027 if (cppi5_desc_is_tdcm(paddr)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001028 complete_all(&uc->teardown_completed);
1029
1030 if (uc->terminated_desc) {
1031 udma_desc_free(&uc->terminated_desc->vd);
1032 uc->terminated_desc = NULL;
1033 }
1034
1035 if (!uc->desc)
1036 udma_start(uc);
1037
1038 goto out;
1039 }
1040
1041 d = udma_udma_desc_from_paddr(uc, paddr);
1042
1043 if (d) {
1044 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1045 d->desc_idx);
1046 if (desc_paddr != paddr) {
1047 dev_err(uc->ud->dev, "not matching descriptors!\n");
1048 goto out;
1049 }
1050
Peter Ujfalusi83903182020-02-14 11:14:41 +02001051 if (d == uc->desc) {
1052 /* active descriptor */
1053 if (uc->cyclic) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001054 udma_cyclic_packet_elapsed(uc);
1055 vchan_cyclic_callback(&d->vd);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001056 } else {
1057 if (udma_is_desc_really_done(uc, d)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001058 uc->bcnt += d->residue;
1059 udma_start(uc);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001060 vchan_cookie_complete(&d->vd);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001061 } else {
1062 schedule_delayed_work(&uc->tx_drain.work,
1063 0);
1064 }
1065 }
Peter Ujfalusi83903182020-02-14 11:14:41 +02001066 } else {
1067 /*
1068 * terminated descriptor, mark the descriptor as
1069 * completed to update the channel's cookie marker
1070 */
1071 dma_cookie_complete(&d->vd.tx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001072 }
1073 }
1074out:
1075 spin_unlock_irqrestore(&uc->vc.lock, flags);
1076
1077 return IRQ_HANDLED;
1078}
1079
1080static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1081{
1082 struct udma_chan *uc = data;
1083 struct udma_desc *d;
1084 unsigned long flags;
1085
1086 spin_lock_irqsave(&uc->vc.lock, flags);
1087 d = uc->desc;
1088 if (d) {
1089 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1090
1091 if (uc->cyclic) {
1092 vchan_cyclic_callback(&d->vd);
1093 } else {
1094 /* TODO: figure out the real amount of data */
1095 uc->bcnt += d->residue;
1096 udma_start(uc);
1097 vchan_cookie_complete(&d->vd);
1098 }
1099 }
1100
1101 spin_unlock_irqrestore(&uc->vc.lock, flags);
1102
1103 return IRQ_HANDLED;
1104}
1105
Grygorii Strashkod7024192019-12-23 13:04:51 +02001106/**
1107 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1108 * @ud: UDMA device
1109 * @from: Start the search from this flow id number
1110 * @cnt: Number of consecutive flow ids to allocate
1111 *
1112 * Allocate range of RX flow ids for future use, those flows can be requested
1113 * only using explicit flow id number. if @from is set to -1 it will try to find
1114 * first free range. if @from is positive value it will force allocation only
1115 * of the specified range of flows.
1116 *
1117 * Returns -ENOMEM if can't find free range.
1118 * -EEXIST if requested range is busy.
1119 * -EINVAL if wrong input values passed.
1120 * Returns flow id on success.
1121 */
1122static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1123{
1124 int start, tmp_from;
1125 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1126
1127 tmp_from = from;
1128 if (tmp_from < 0)
1129 tmp_from = ud->rchan_cnt;
1130 /* default flows can't be allocated and accessible only by id */
1131 if (tmp_from < ud->rchan_cnt)
1132 return -EINVAL;
1133
1134 if (tmp_from + cnt > ud->rflow_cnt)
1135 return -EINVAL;
1136
1137 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1138 ud->rflow_cnt);
1139
1140 start = bitmap_find_next_zero_area(tmp,
1141 ud->rflow_cnt,
1142 tmp_from, cnt, 0);
1143 if (start >= ud->rflow_cnt)
1144 return -ENOMEM;
1145
1146 if (from >= 0 && start != from)
1147 return -EEXIST;
1148
1149 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1150 return start;
1151}
1152
1153static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1154{
1155 if (from < ud->rchan_cnt)
1156 return -EINVAL;
1157 if (from + cnt > ud->rflow_cnt)
1158 return -EINVAL;
1159
1160 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1161 return 0;
1162}
1163
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001164static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1165{
1166 /*
1167 * Attempt to request rflow by ID can be made for any rflow
1168 * if not in use with assumption that caller knows what's doing.
1169 * TI-SCI FW will perform additional permission check ant way, it's
1170 * safe
1171 */
1172
1173 if (id < 0 || id >= ud->rflow_cnt)
1174 return ERR_PTR(-ENOENT);
1175
1176 if (test_bit(id, ud->rflow_in_use))
1177 return ERR_PTR(-ENOENT);
1178
1179 /* GP rflow has to be allocated first */
1180 if (!test_bit(id, ud->rflow_gp_map) &&
1181 !test_bit(id, ud->rflow_gp_map_allocated))
1182 return ERR_PTR(-EINVAL);
1183
1184 dev_dbg(ud->dev, "get rflow%d\n", id);
1185 set_bit(id, ud->rflow_in_use);
1186 return &ud->rflows[id];
1187}
1188
1189static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1190{
1191 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1192 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1193 return;
1194 }
1195
1196 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1197 clear_bit(rflow->id, ud->rflow_in_use);
1198}
1199
1200#define UDMA_RESERVE_RESOURCE(res) \
1201static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1202 enum udma_tp_level tpl, \
1203 int id) \
1204{ \
1205 if (id >= 0) { \
1206 if (test_bit(id, ud->res##_map)) { \
1207 dev_err(ud->dev, "res##%d is in use\n", id); \
1208 return ERR_PTR(-ENOENT); \
1209 } \
1210 } else { \
1211 int start; \
1212 \
1213 if (tpl >= ud->match_data->tpl_levels) \
1214 tpl = ud->match_data->tpl_levels - 1; \
1215 \
1216 start = ud->match_data->level_start_idx[tpl]; \
1217 \
1218 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1219 start); \
1220 if (id == ud->res##_cnt) { \
1221 return ERR_PTR(-ENOENT); \
1222 } \
1223 } \
1224 \
1225 set_bit(id, ud->res##_map); \
1226 return &ud->res##s[id]; \
1227}
1228
1229UDMA_RESERVE_RESOURCE(tchan);
1230UDMA_RESERVE_RESOURCE(rchan);
1231
1232static int udma_get_tchan(struct udma_chan *uc)
1233{
1234 struct udma_dev *ud = uc->ud;
1235
1236 if (uc->tchan) {
1237 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1238 uc->id, uc->tchan->id);
1239 return 0;
1240 }
1241
1242 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001243
Samuel Zou214a0002020-05-06 17:25:46 +08001244 return PTR_ERR_OR_ZERO(uc->tchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001245}
1246
1247static int udma_get_rchan(struct udma_chan *uc)
1248{
1249 struct udma_dev *ud = uc->ud;
1250
1251 if (uc->rchan) {
1252 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1253 uc->id, uc->rchan->id);
1254 return 0;
1255 }
1256
1257 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001258
Samuel Zou214a0002020-05-06 17:25:46 +08001259 return PTR_ERR_OR_ZERO(uc->rchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001260}
1261
1262static int udma_get_chan_pair(struct udma_chan *uc)
1263{
1264 struct udma_dev *ud = uc->ud;
1265 const struct udma_match_data *match_data = ud->match_data;
1266 int chan_id, end;
1267
1268 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1269 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1270 uc->id, uc->tchan->id);
1271 return 0;
1272 }
1273
1274 if (uc->tchan) {
1275 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1276 uc->id, uc->tchan->id);
1277 return -EBUSY;
1278 } else if (uc->rchan) {
1279 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1280 uc->id, uc->rchan->id);
1281 return -EBUSY;
1282 }
1283
1284 /* Can be optimized, but let's have it like this for now */
1285 end = min(ud->tchan_cnt, ud->rchan_cnt);
1286 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1287 chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
1288 for (; chan_id < end; chan_id++) {
1289 if (!test_bit(chan_id, ud->tchan_map) &&
1290 !test_bit(chan_id, ud->rchan_map))
1291 break;
1292 }
1293
1294 if (chan_id == end)
1295 return -ENOENT;
1296
1297 set_bit(chan_id, ud->tchan_map);
1298 set_bit(chan_id, ud->rchan_map);
1299 uc->tchan = &ud->tchans[chan_id];
1300 uc->rchan = &ud->rchans[chan_id];
1301
1302 return 0;
1303}
1304
1305static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1306{
1307 struct udma_dev *ud = uc->ud;
1308
1309 if (!uc->rchan) {
1310 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1311 return -EINVAL;
1312 }
1313
1314 if (uc->rflow) {
1315 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1316 uc->id, uc->rflow->id);
1317 return 0;
1318 }
1319
1320 uc->rflow = __udma_get_rflow(ud, flow_id);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001321
Samuel Zou214a0002020-05-06 17:25:46 +08001322 return PTR_ERR_OR_ZERO(uc->rflow);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001323}
1324
1325static void udma_put_rchan(struct udma_chan *uc)
1326{
1327 struct udma_dev *ud = uc->ud;
1328
1329 if (uc->rchan) {
1330 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1331 uc->rchan->id);
1332 clear_bit(uc->rchan->id, ud->rchan_map);
1333 uc->rchan = NULL;
1334 }
1335}
1336
1337static void udma_put_tchan(struct udma_chan *uc)
1338{
1339 struct udma_dev *ud = uc->ud;
1340
1341 if (uc->tchan) {
1342 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1343 uc->tchan->id);
1344 clear_bit(uc->tchan->id, ud->tchan_map);
1345 uc->tchan = NULL;
1346 }
1347}
1348
1349static void udma_put_rflow(struct udma_chan *uc)
1350{
1351 struct udma_dev *ud = uc->ud;
1352
1353 if (uc->rflow) {
1354 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1355 uc->rflow->id);
1356 __udma_put_rflow(ud, uc->rflow);
1357 uc->rflow = NULL;
1358 }
1359}
1360
1361static void udma_free_tx_resources(struct udma_chan *uc)
1362{
1363 if (!uc->tchan)
1364 return;
1365
1366 k3_ringacc_ring_free(uc->tchan->t_ring);
1367 k3_ringacc_ring_free(uc->tchan->tc_ring);
1368 uc->tchan->t_ring = NULL;
1369 uc->tchan->tc_ring = NULL;
1370
1371 udma_put_tchan(uc);
1372}
1373
1374static int udma_alloc_tx_resources(struct udma_chan *uc)
1375{
1376 struct k3_ring_cfg ring_cfg;
1377 struct udma_dev *ud = uc->ud;
1378 int ret;
1379
1380 ret = udma_get_tchan(uc);
1381 if (ret)
1382 return ret;
1383
1384 uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
1385 uc->tchan->id, 0);
1386 if (!uc->tchan->t_ring) {
1387 ret = -EBUSY;
1388 goto err_tx_ring;
1389 }
1390
1391 uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1392 if (!uc->tchan->tc_ring) {
1393 ret = -EBUSY;
1394 goto err_txc_ring;
1395 }
1396
1397 memset(&ring_cfg, 0, sizeof(ring_cfg));
1398 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1399 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1400 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1401
1402 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1403 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1404
1405 if (ret)
1406 goto err_ringcfg;
1407
1408 return 0;
1409
1410err_ringcfg:
1411 k3_ringacc_ring_free(uc->tchan->tc_ring);
1412 uc->tchan->tc_ring = NULL;
1413err_txc_ring:
1414 k3_ringacc_ring_free(uc->tchan->t_ring);
1415 uc->tchan->t_ring = NULL;
1416err_tx_ring:
1417 udma_put_tchan(uc);
1418
1419 return ret;
1420}
1421
1422static void udma_free_rx_resources(struct udma_chan *uc)
1423{
1424 if (!uc->rchan)
1425 return;
1426
1427 if (uc->rflow) {
1428 struct udma_rflow *rflow = uc->rflow;
1429
1430 k3_ringacc_ring_free(rflow->fd_ring);
1431 k3_ringacc_ring_free(rflow->r_ring);
1432 rflow->fd_ring = NULL;
1433 rflow->r_ring = NULL;
1434
1435 udma_put_rflow(uc);
1436 }
1437
1438 udma_put_rchan(uc);
1439}
1440
1441static int udma_alloc_rx_resources(struct udma_chan *uc)
1442{
1443 struct udma_dev *ud = uc->ud;
1444 struct k3_ring_cfg ring_cfg;
1445 struct udma_rflow *rflow;
1446 int fd_ring_id;
1447 int ret;
1448
1449 ret = udma_get_rchan(uc);
1450 if (ret)
1451 return ret;
1452
1453 /* For MEM_TO_MEM we don't need rflow or rings */
1454 if (uc->config.dir == DMA_MEM_TO_MEM)
1455 return 0;
1456
1457 ret = udma_get_rflow(uc, uc->rchan->id);
1458 if (ret) {
1459 ret = -EBUSY;
1460 goto err_rflow;
1461 }
1462
1463 rflow = uc->rflow;
1464 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1465 rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
1466 if (!rflow->fd_ring) {
1467 ret = -EBUSY;
1468 goto err_rx_ring;
1469 }
1470
1471 rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1472 if (!rflow->r_ring) {
1473 ret = -EBUSY;
1474 goto err_rxc_ring;
1475 }
1476
1477 memset(&ring_cfg, 0, sizeof(ring_cfg));
1478
1479 if (uc->config.pkt_mode)
1480 ring_cfg.size = SG_MAX_SEGMENTS;
1481 else
1482 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1483
1484 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1485 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1486
1487 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1488 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1489 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1490
1491 if (ret)
1492 goto err_ringcfg;
1493
1494 return 0;
1495
1496err_ringcfg:
1497 k3_ringacc_ring_free(rflow->r_ring);
1498 rflow->r_ring = NULL;
1499err_rxc_ring:
1500 k3_ringacc_ring_free(rflow->fd_ring);
1501 rflow->fd_ring = NULL;
1502err_rx_ring:
1503 udma_put_rflow(uc);
1504err_rflow:
1505 udma_put_rchan(uc);
1506
1507 return ret;
1508}
1509
1510#define TISCI_TCHAN_VALID_PARAMS ( \
1511 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1512 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1513 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1514 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1515 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1516 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001517 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1518 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001519
1520#define TISCI_RCHAN_VALID_PARAMS ( \
1521 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1522 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1523 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1524 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1525 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1526 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1527 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001528 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1529 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001530
1531static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1532{
1533 struct udma_dev *ud = uc->ud;
1534 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1535 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1536 struct udma_tchan *tchan = uc->tchan;
1537 struct udma_rchan *rchan = uc->rchan;
1538 int ret = 0;
1539
1540 /* Non synchronized - mem to mem type of transfer */
1541 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1542 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1543 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1544
1545 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1546 req_tx.nav_id = tisci_rm->tisci_dev_id;
1547 req_tx.index = tchan->id;
1548 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1549 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1550 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001551 req_tx.tx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001552
1553 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1554 if (ret) {
1555 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1556 return ret;
1557 }
1558
1559 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1560 req_rx.nav_id = tisci_rm->tisci_dev_id;
1561 req_rx.index = rchan->id;
1562 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1563 req_rx.rxcq_qnum = tc_ring;
1564 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001565 req_rx.rx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001566
1567 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1568 if (ret)
1569 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1570
1571 return ret;
1572}
1573
1574static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1575{
1576 struct udma_dev *ud = uc->ud;
1577 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1578 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1579 struct udma_tchan *tchan = uc->tchan;
1580 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1581 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1582 u32 mode, fetch_size;
1583 int ret = 0;
1584
1585 if (uc->config.pkt_mode) {
1586 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1587 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1588 uc->config.psd_size, 0);
1589 } else {
1590 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1591 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1592 }
1593
1594 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1595 req_tx.nav_id = tisci_rm->tisci_dev_id;
1596 req_tx.index = tchan->id;
1597 req_tx.tx_chan_type = mode;
1598 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1599 req_tx.tx_fetch_size = fetch_size >> 2;
1600 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001601 req_tx.tx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001602
1603 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1604 if (ret)
1605 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1606
1607 return ret;
1608}
1609
1610static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1611{
1612 struct udma_dev *ud = uc->ud;
1613 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1614 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1615 struct udma_rchan *rchan = uc->rchan;
1616 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1617 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1618 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1619 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1620 u32 mode, fetch_size;
1621 int ret = 0;
1622
1623 if (uc->config.pkt_mode) {
1624 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1625 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1626 uc->config.psd_size, 0);
1627 } else {
1628 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1629 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1630 }
1631
1632 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1633 req_rx.nav_id = tisci_rm->tisci_dev_id;
1634 req_rx.index = rchan->id;
1635 req_rx.rx_fetch_size = fetch_size >> 2;
1636 req_rx.rxcq_qnum = rx_ring;
1637 req_rx.rx_chan_type = mode;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001638 req_rx.rx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001639
1640 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1641 if (ret) {
1642 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1643 return ret;
1644 }
1645
1646 flow_req.valid_params =
1647 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1648 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1649 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1650 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1651 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1652 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1653 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1654 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1655 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1656 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1657 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1658 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1659 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1660
1661 flow_req.nav_id = tisci_rm->tisci_dev_id;
1662 flow_req.flow_index = rchan->id;
1663
1664 if (uc->config.needs_epib)
1665 flow_req.rx_einfo_present = 1;
1666 else
1667 flow_req.rx_einfo_present = 0;
1668 if (uc->config.psd_size)
1669 flow_req.rx_psinfo_present = 1;
1670 else
1671 flow_req.rx_psinfo_present = 0;
1672 flow_req.rx_error_handling = 1;
1673 flow_req.rx_dest_qnum = rx_ring;
1674 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1675 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1676 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1677 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1678 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1679 flow_req.rx_fdq1_qnum = fd_ring;
1680 flow_req.rx_fdq2_qnum = fd_ring;
1681 flow_req.rx_fdq3_qnum = fd_ring;
1682
1683 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1684
1685 if (ret)
1686 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1687
1688 return 0;
1689}
1690
1691static int udma_alloc_chan_resources(struct dma_chan *chan)
1692{
1693 struct udma_chan *uc = to_udma_chan(chan);
1694 struct udma_dev *ud = to_udma_dev(chan->device);
1695 const struct udma_match_data *match_data = ud->match_data;
1696 struct k3_ring *irq_ring;
1697 u32 irq_udma_idx;
1698 int ret;
1699
1700 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1701 uc->use_dma_pool = true;
1702 /* in case of MEM_TO_MEM we have maximum of two TRs */
1703 if (uc->config.dir == DMA_MEM_TO_MEM) {
1704 uc->config.hdesc_size = cppi5_trdesc_calc_size(
1705 sizeof(struct cppi5_tr_type15_t), 2);
1706 uc->config.pkt_mode = false;
1707 }
1708 }
1709
1710 if (uc->use_dma_pool) {
1711 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1712 uc->config.hdesc_size,
1713 ud->desc_align,
1714 0);
1715 if (!uc->hdesc_pool) {
1716 dev_err(ud->ddev.dev,
1717 "Descriptor pool allocation failed\n");
1718 uc->use_dma_pool = false;
1719 return -ENOMEM;
1720 }
1721 }
1722
1723 /*
1724 * Make sure that the completion is in a known state:
1725 * No teardown, the channel is idle
1726 */
1727 reinit_completion(&uc->teardown_completed);
1728 complete_all(&uc->teardown_completed);
1729 uc->state = UDMA_CHAN_IS_IDLE;
1730
1731 switch (uc->config.dir) {
1732 case DMA_MEM_TO_MEM:
1733 /* Non synchronized - mem to mem type of transfer */
1734 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1735 uc->id);
1736
1737 ret = udma_get_chan_pair(uc);
1738 if (ret)
1739 return ret;
1740
1741 ret = udma_alloc_tx_resources(uc);
1742 if (ret)
1743 return ret;
1744
1745 ret = udma_alloc_rx_resources(uc);
1746 if (ret) {
1747 udma_free_tx_resources(uc);
1748 return ret;
1749 }
1750
1751 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1752 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1753 K3_PSIL_DST_THREAD_ID_OFFSET;
1754
1755 irq_ring = uc->tchan->tc_ring;
1756 irq_udma_idx = uc->tchan->id;
1757
1758 ret = udma_tisci_m2m_channel_config(uc);
1759 break;
1760 case DMA_MEM_TO_DEV:
1761 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1762 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1763 uc->id);
1764
1765 ret = udma_alloc_tx_resources(uc);
1766 if (ret) {
1767 uc->config.remote_thread_id = -1;
1768 return ret;
1769 }
1770
1771 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1772 uc->config.dst_thread = uc->config.remote_thread_id;
1773 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1774
1775 irq_ring = uc->tchan->tc_ring;
1776 irq_udma_idx = uc->tchan->id;
1777
1778 ret = udma_tisci_tx_channel_config(uc);
1779 break;
1780 case DMA_DEV_TO_MEM:
1781 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1782 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1783 uc->id);
1784
1785 ret = udma_alloc_rx_resources(uc);
1786 if (ret) {
1787 uc->config.remote_thread_id = -1;
1788 return ret;
1789 }
1790
1791 uc->config.src_thread = uc->config.remote_thread_id;
1792 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1793 K3_PSIL_DST_THREAD_ID_OFFSET;
1794
1795 irq_ring = uc->rflow->r_ring;
1796 irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
1797
1798 ret = udma_tisci_rx_channel_config(uc);
1799 break;
1800 default:
1801 /* Can not happen */
1802 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1803 __func__, uc->id, uc->config.dir);
1804 return -EINVAL;
1805 }
1806
1807 /* check if the channel configuration was successful */
1808 if (ret)
1809 goto err_res_free;
1810
1811 if (udma_is_chan_running(uc)) {
1812 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1813 udma_stop(uc);
1814 if (udma_is_chan_running(uc)) {
1815 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
Peter Ujfalusi7ae6d7b2020-05-12 16:45:19 +03001816 ret = -EBUSY;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001817 goto err_res_free;
1818 }
1819 }
1820
1821 /* PSI-L pairing */
1822 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1823 if (ret) {
1824 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1825 uc->config.src_thread, uc->config.dst_thread);
1826 goto err_res_free;
1827 }
1828
1829 uc->psil_paired = true;
1830
1831 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1832 if (uc->irq_num_ring <= 0) {
1833 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1834 k3_ringacc_get_ring_id(irq_ring));
1835 ret = -EINVAL;
1836 goto err_psi_free;
1837 }
1838
1839 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1840 IRQF_TRIGGER_HIGH, uc->name, uc);
1841 if (ret) {
1842 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1843 goto err_irq_free;
1844 }
1845
1846 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1847 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1848 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1849 irq_udma_idx);
1850 if (uc->irq_num_udma <= 0) {
1851 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1852 irq_udma_idx);
1853 free_irq(uc->irq_num_ring, uc);
1854 ret = -EINVAL;
1855 goto err_irq_free;
1856 }
1857
1858 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1859 uc->name, uc);
1860 if (ret) {
1861 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1862 uc->id);
1863 free_irq(uc->irq_num_ring, uc);
1864 goto err_irq_free;
1865 }
1866 } else {
1867 uc->irq_num_udma = 0;
1868 }
1869
1870 udma_reset_rings(uc);
1871
1872 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
1873 udma_check_tx_completion);
1874 return 0;
1875
1876err_irq_free:
1877 uc->irq_num_ring = 0;
1878 uc->irq_num_udma = 0;
1879err_psi_free:
1880 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1881 uc->psil_paired = false;
1882err_res_free:
1883 udma_free_tx_resources(uc);
1884 udma_free_rx_resources(uc);
1885
1886 udma_reset_uchan(uc);
1887
1888 if (uc->use_dma_pool) {
1889 dma_pool_destroy(uc->hdesc_pool);
1890 uc->use_dma_pool = false;
1891 }
1892
1893 return ret;
1894}
1895
1896static int udma_slave_config(struct dma_chan *chan,
1897 struct dma_slave_config *cfg)
1898{
1899 struct udma_chan *uc = to_udma_chan(chan);
1900
1901 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1902
1903 return 0;
1904}
1905
1906static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1907 size_t tr_size, int tr_count,
1908 enum dma_transfer_direction dir)
1909{
1910 struct udma_hwdesc *hwdesc;
1911 struct cppi5_desc_hdr_t *tr_desc;
1912 struct udma_desc *d;
1913 u32 reload_count = 0;
1914 u32 ring_id;
1915
1916 switch (tr_size) {
1917 case 16:
1918 case 32:
1919 case 64:
1920 case 128:
1921 break;
1922 default:
1923 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1924 return NULL;
1925 }
1926
1927 /* We have only one descriptor containing multiple TRs */
1928 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1929 if (!d)
1930 return NULL;
1931
1932 d->sglen = tr_count;
1933
1934 d->hwdesc_count = 1;
1935 hwdesc = &d->hwdesc[0];
1936
1937 /* Allocate memory for DMA ring descriptor */
1938 if (uc->use_dma_pool) {
1939 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1940 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1941 GFP_NOWAIT,
1942 &hwdesc->cppi5_desc_paddr);
1943 } else {
1944 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1945 tr_count);
1946 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1947 uc->ud->desc_align);
1948 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1949 hwdesc->cppi5_desc_size,
1950 &hwdesc->cppi5_desc_paddr,
1951 GFP_NOWAIT);
1952 }
1953
1954 if (!hwdesc->cppi5_desc_vaddr) {
1955 kfree(d);
1956 return NULL;
1957 }
1958
1959 /* Start of the TR req records */
1960 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
1961 /* Start address of the TR response array */
1962 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
1963
1964 tr_desc = hwdesc->cppi5_desc_vaddr;
1965
1966 if (uc->cyclic)
1967 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
1968
1969 if (dir == DMA_DEV_TO_MEM)
1970 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1971 else
1972 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
1973
1974 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
1975 cppi5_desc_set_pktids(tr_desc, uc->id,
1976 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
1977 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
1978
1979 return d;
1980}
1981
Peter Ujfalusia9793402020-02-14 11:14:38 +02001982/**
1983 * udma_get_tr_counters - calculate TR counters for a given length
1984 * @len: Length of the trasnfer
1985 * @align_to: Preferred alignment
1986 * @tr0_cnt0: First TR icnt0
1987 * @tr0_cnt1: First TR icnt1
1988 * @tr1_cnt0: Second (if used) TR icnt0
1989 *
1990 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
1991 * For len >= SZ_64K two TRs are used in a simple way:
1992 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
1993 * Second TR: the remaining length (tr1_cnt0)
1994 *
1995 * Returns the number of TRs the length needs (1 or 2)
1996 * -EINVAL if the length can not be supported
1997 */
1998static int udma_get_tr_counters(size_t len, unsigned long align_to,
1999 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2000{
2001 if (len < SZ_64K) {
2002 *tr0_cnt0 = len;
2003 *tr0_cnt1 = 1;
2004
2005 return 1;
2006 }
2007
2008 if (align_to > 3)
2009 align_to = 3;
2010
2011realign:
2012 *tr0_cnt0 = SZ_64K - BIT(align_to);
2013 if (len / *tr0_cnt0 >= SZ_64K) {
2014 if (align_to) {
2015 align_to--;
2016 goto realign;
2017 }
2018 return -EINVAL;
2019 }
2020
2021 *tr0_cnt1 = len / *tr0_cnt0;
2022 *tr1_cnt0 = len % *tr0_cnt0;
2023
2024 return 2;
2025}
2026
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002027static struct udma_desc *
2028udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2029 unsigned int sglen, enum dma_transfer_direction dir,
2030 unsigned long tx_flags, void *context)
2031{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002032 struct scatterlist *sgent;
2033 struct udma_desc *d;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002034 struct cppi5_tr_type1_t *tr_req = NULL;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002035 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002036 unsigned int i;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002037 size_t tr_size;
2038 int num_tr = 0;
2039 int tr_idx = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002040
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002041 if (!is_slave_direction(dir)) {
2042 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002043 return NULL;
2044 }
2045
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002046 /* estimate the number of TRs we will need */
2047 for_each_sg(sgl, sgent, sglen, i) {
2048 if (sg_dma_len(sgent) < SZ_64K)
2049 num_tr++;
2050 else
2051 num_tr += 2;
2052 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002053
2054 /* Now allocate and setup the descriptor. */
2055 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002056 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002057 if (!d)
2058 return NULL;
2059
2060 d->sglen = sglen;
2061
2062 tr_req = d->hwdesc[0].tr_req_base;
2063 for_each_sg(sgl, sgent, sglen, i) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002064 dma_addr_t sg_addr = sg_dma_address(sgent);
2065
2066 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2067 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2068 if (num_tr < 0) {
2069 dev_err(uc->ud->dev, "size %u is not supported\n",
2070 sg_dma_len(sgent));
2071 udma_free_hwdesc(uc, d);
2072 kfree(d);
2073 return NULL;
2074 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002075
2076 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2077 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2078 cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
2079
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002080 tr_req[tr_idx].addr = sg_addr;
2081 tr_req[tr_idx].icnt0 = tr0_cnt0;
2082 tr_req[tr_idx].icnt1 = tr0_cnt1;
2083 tr_req[tr_idx].dim1 = tr0_cnt0;
2084 tr_idx++;
2085
2086 if (num_tr == 2) {
2087 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2088 false, false,
2089 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2090 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2091 CPPI5_TR_CSF_SUPR_EVT);
2092
2093 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2094 tr_req[tr_idx].icnt0 = tr1_cnt0;
2095 tr_req[tr_idx].icnt1 = 1;
2096 tr_req[tr_idx].dim1 = tr1_cnt0;
2097 tr_idx++;
2098 }
2099
2100 d->residue += sg_dma_len(sgent);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002101 }
2102
Peter Ujfalusibe4054b2020-05-12 16:45:31 +03002103 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2104 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002105
2106 return d;
2107}
2108
2109static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2110 enum dma_slave_buswidth dev_width,
2111 u16 elcnt)
2112{
2113 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2114 return 0;
2115
2116 /* Bus width translates to the element size (ES) */
2117 switch (dev_width) {
2118 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2119 d->static_tr.elsize = 0;
2120 break;
2121 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2122 d->static_tr.elsize = 1;
2123 break;
2124 case DMA_SLAVE_BUSWIDTH_3_BYTES:
2125 d->static_tr.elsize = 2;
2126 break;
2127 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2128 d->static_tr.elsize = 3;
2129 break;
2130 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2131 d->static_tr.elsize = 4;
2132 break;
2133 default: /* not reached */
2134 return -EINVAL;
2135 }
2136
2137 d->static_tr.elcnt = elcnt;
2138
2139 /*
2140 * PDMA must to close the packet when the channel is in packet mode.
2141 * For TR mode when the channel is not cyclic we also need PDMA to close
2142 * the packet otherwise the transfer will stall because PDMA holds on
2143 * the data it has received from the peripheral.
2144 */
2145 if (uc->config.pkt_mode || !uc->cyclic) {
2146 unsigned int div = dev_width * elcnt;
2147
2148 if (uc->cyclic)
2149 d->static_tr.bstcnt = d->residue / d->sglen / div;
2150 else
2151 d->static_tr.bstcnt = d->residue / div;
2152
2153 if (uc->config.dir == DMA_DEV_TO_MEM &&
2154 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2155 return -EINVAL;
2156 } else {
2157 d->static_tr.bstcnt = 0;
2158 }
2159
2160 return 0;
2161}
2162
2163static struct udma_desc *
2164udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2165 unsigned int sglen, enum dma_transfer_direction dir,
2166 unsigned long tx_flags, void *context)
2167{
2168 struct scatterlist *sgent;
2169 struct cppi5_host_desc_t *h_desc = NULL;
2170 struct udma_desc *d;
2171 u32 ring_id;
2172 unsigned int i;
2173
2174 d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2175 if (!d)
2176 return NULL;
2177
2178 d->sglen = sglen;
2179 d->hwdesc_count = sglen;
2180
2181 if (dir == DMA_DEV_TO_MEM)
2182 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2183 else
2184 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2185
2186 for_each_sg(sgl, sgent, sglen, i) {
2187 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2188 dma_addr_t sg_addr = sg_dma_address(sgent);
2189 struct cppi5_host_desc_t *desc;
2190 size_t sg_len = sg_dma_len(sgent);
2191
2192 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2193 GFP_NOWAIT,
2194 &hwdesc->cppi5_desc_paddr);
2195 if (!hwdesc->cppi5_desc_vaddr) {
2196 dev_err(uc->ud->dev,
2197 "descriptor%d allocation failed\n", i);
2198
2199 udma_free_hwdesc(uc, d);
2200 kfree(d);
2201 return NULL;
2202 }
2203
2204 d->residue += sg_len;
2205 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2206 desc = hwdesc->cppi5_desc_vaddr;
2207
2208 if (i == 0) {
2209 cppi5_hdesc_init(desc, 0, 0);
2210 /* Flow and Packed ID */
2211 cppi5_desc_set_pktids(&desc->hdr, uc->id,
2212 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2213 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2214 } else {
2215 cppi5_hdesc_reset_hbdesc(desc);
2216 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2217 }
2218
2219 /* attach the sg buffer to the descriptor */
2220 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2221
2222 /* Attach link as host buffer descriptor */
2223 if (h_desc)
2224 cppi5_hdesc_link_hbdesc(h_desc,
2225 hwdesc->cppi5_desc_paddr);
2226
2227 if (dir == DMA_MEM_TO_DEV)
2228 h_desc = desc;
2229 }
2230
2231 if (d->residue >= SZ_4M) {
2232 dev_err(uc->ud->dev,
2233 "%s: Transfer size %u is over the supported 4M range\n",
2234 __func__, d->residue);
2235 udma_free_hwdesc(uc, d);
2236 kfree(d);
2237 return NULL;
2238 }
2239
2240 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2241 cppi5_hdesc_set_pktlen(h_desc, d->residue);
2242
2243 return d;
2244}
2245
2246static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2247 void *data, size_t len)
2248{
2249 struct udma_desc *d = to_udma_desc(desc);
2250 struct udma_chan *uc = to_udma_chan(desc->chan);
2251 struct cppi5_host_desc_t *h_desc;
2252 u32 psd_size = len;
2253 u32 flags = 0;
2254
2255 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2256 return -ENOTSUPP;
2257
2258 if (!data || len > uc->config.metadata_size)
2259 return -EINVAL;
2260
2261 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2262 return -EINVAL;
2263
2264 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2265 if (d->dir == DMA_MEM_TO_DEV)
2266 memcpy(h_desc->epib, data, len);
2267
2268 if (uc->config.needs_epib)
2269 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2270
2271 d->metadata = data;
2272 d->metadata_size = len;
2273 if (uc->config.needs_epib)
2274 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2275
2276 cppi5_hdesc_update_flags(h_desc, flags);
2277 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2278
2279 return 0;
2280}
2281
2282static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2283 size_t *payload_len, size_t *max_len)
2284{
2285 struct udma_desc *d = to_udma_desc(desc);
2286 struct udma_chan *uc = to_udma_chan(desc->chan);
2287 struct cppi5_host_desc_t *h_desc;
2288
2289 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2290 return ERR_PTR(-ENOTSUPP);
2291
2292 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2293
2294 *max_len = uc->config.metadata_size;
2295
2296 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2297 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2298 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2299
2300 return h_desc->epib;
2301}
2302
2303static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2304 size_t payload_len)
2305{
2306 struct udma_desc *d = to_udma_desc(desc);
2307 struct udma_chan *uc = to_udma_chan(desc->chan);
2308 struct cppi5_host_desc_t *h_desc;
2309 u32 psd_size = payload_len;
2310 u32 flags = 0;
2311
2312 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2313 return -ENOTSUPP;
2314
2315 if (payload_len > uc->config.metadata_size)
2316 return -EINVAL;
2317
2318 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2319 return -EINVAL;
2320
2321 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2322
2323 if (uc->config.needs_epib) {
2324 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2325 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2326 }
2327
2328 cppi5_hdesc_update_flags(h_desc, flags);
2329 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2330
2331 return 0;
2332}
2333
2334static struct dma_descriptor_metadata_ops metadata_ops = {
2335 .attach = udma_attach_metadata,
2336 .get_ptr = udma_get_metadata_ptr,
2337 .set_len = udma_set_metadata_len,
2338};
2339
2340static struct dma_async_tx_descriptor *
2341udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2342 unsigned int sglen, enum dma_transfer_direction dir,
2343 unsigned long tx_flags, void *context)
2344{
2345 struct udma_chan *uc = to_udma_chan(chan);
2346 enum dma_slave_buswidth dev_width;
2347 struct udma_desc *d;
2348 u32 burst;
2349
2350 if (dir != uc->config.dir) {
2351 dev_err(chan->device->dev,
2352 "%s: chan%d is for %s, not supporting %s\n",
2353 __func__, uc->id,
2354 dmaengine_get_direction_text(uc->config.dir),
2355 dmaengine_get_direction_text(dir));
2356 return NULL;
2357 }
2358
2359 if (dir == DMA_DEV_TO_MEM) {
2360 dev_width = uc->cfg.src_addr_width;
2361 burst = uc->cfg.src_maxburst;
2362 } else if (dir == DMA_MEM_TO_DEV) {
2363 dev_width = uc->cfg.dst_addr_width;
2364 burst = uc->cfg.dst_maxburst;
2365 } else {
2366 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2367 return NULL;
2368 }
2369
2370 if (!burst)
2371 burst = 1;
2372
2373 if (uc->config.pkt_mode)
2374 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2375 context);
2376 else
2377 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2378 context);
2379
2380 if (!d)
2381 return NULL;
2382
2383 d->dir = dir;
2384 d->desc_idx = 0;
2385 d->tr_idx = 0;
2386
2387 /* static TR for remote PDMA */
2388 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2389 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002390 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002391 __func__, d->static_tr.bstcnt);
2392
2393 udma_free_hwdesc(uc, d);
2394 kfree(d);
2395 return NULL;
2396 }
2397
2398 if (uc->config.metadata_size)
2399 d->vd.tx.metadata_ops = &metadata_ops;
2400
2401 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2402}
2403
2404static struct udma_desc *
2405udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2406 size_t buf_len, size_t period_len,
2407 enum dma_transfer_direction dir, unsigned long flags)
2408{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002409 struct udma_desc *d;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002410 size_t tr_size, period_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002411 struct cppi5_tr_type1_t *tr_req;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002412 unsigned int periods = buf_len / period_len;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002413 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2414 unsigned int i;
2415 int num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002416
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002417 if (!is_slave_direction(dir)) {
2418 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002419 return NULL;
2420 }
2421
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002422 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2423 &tr0_cnt1, &tr1_cnt0);
2424 if (num_tr < 0) {
2425 dev_err(uc->ud->dev, "size %zu is not supported\n",
2426 period_len);
2427 return NULL;
2428 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002429
2430 /* Now allocate and setup the descriptor. */
2431 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002432 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002433 if (!d)
2434 return NULL;
2435
2436 tr_req = d->hwdesc[0].tr_req_base;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002437 period_addr = buf_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002438 for (i = 0; i < periods; i++) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002439 int tr_idx = i * num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002440
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002441 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2442 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2443
2444 tr_req[tr_idx].addr = period_addr;
2445 tr_req[tr_idx].icnt0 = tr0_cnt0;
2446 tr_req[tr_idx].icnt1 = tr0_cnt1;
2447 tr_req[tr_idx].dim1 = tr0_cnt0;
2448
2449 if (num_tr == 2) {
2450 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2451 CPPI5_TR_CSF_SUPR_EVT);
2452 tr_idx++;
2453
2454 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2455 false, false,
2456 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2457
2458 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2459 tr_req[tr_idx].icnt0 = tr1_cnt0;
2460 tr_req[tr_idx].icnt1 = 1;
2461 tr_req[tr_idx].dim1 = tr1_cnt0;
2462 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002463
2464 if (!(flags & DMA_PREP_INTERRUPT))
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002465 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002466 CPPI5_TR_CSF_SUPR_EVT);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002467
2468 period_addr += period_len;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002469 }
2470
2471 return d;
2472}
2473
2474static struct udma_desc *
2475udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2476 size_t buf_len, size_t period_len,
2477 enum dma_transfer_direction dir, unsigned long flags)
2478{
2479 struct udma_desc *d;
2480 u32 ring_id;
2481 int i;
2482 int periods = buf_len / period_len;
2483
2484 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2485 return NULL;
2486
2487 if (period_len >= SZ_4M)
2488 return NULL;
2489
2490 d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2491 if (!d)
2492 return NULL;
2493
2494 d->hwdesc_count = periods;
2495
2496 /* TODO: re-check this... */
2497 if (dir == DMA_DEV_TO_MEM)
2498 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2499 else
2500 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2501
2502 for (i = 0; i < periods; i++) {
2503 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2504 dma_addr_t period_addr = buf_addr + (period_len * i);
2505 struct cppi5_host_desc_t *h_desc;
2506
2507 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2508 GFP_NOWAIT,
2509 &hwdesc->cppi5_desc_paddr);
2510 if (!hwdesc->cppi5_desc_vaddr) {
2511 dev_err(uc->ud->dev,
2512 "descriptor%d allocation failed\n", i);
2513
2514 udma_free_hwdesc(uc, d);
2515 kfree(d);
2516 return NULL;
2517 }
2518
2519 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2520 h_desc = hwdesc->cppi5_desc_vaddr;
2521
2522 cppi5_hdesc_init(h_desc, 0, 0);
2523 cppi5_hdesc_set_pktlen(h_desc, period_len);
2524
2525 /* Flow and Packed ID */
2526 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2527 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2528 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2529
2530 /* attach each period to a new descriptor */
2531 cppi5_hdesc_attach_buf(h_desc,
2532 period_addr, period_len,
2533 period_addr, period_len);
2534 }
2535
2536 return d;
2537}
2538
2539static struct dma_async_tx_descriptor *
2540udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2541 size_t period_len, enum dma_transfer_direction dir,
2542 unsigned long flags)
2543{
2544 struct udma_chan *uc = to_udma_chan(chan);
2545 enum dma_slave_buswidth dev_width;
2546 struct udma_desc *d;
2547 u32 burst;
2548
2549 if (dir != uc->config.dir) {
2550 dev_err(chan->device->dev,
2551 "%s: chan%d is for %s, not supporting %s\n",
2552 __func__, uc->id,
2553 dmaengine_get_direction_text(uc->config.dir),
2554 dmaengine_get_direction_text(dir));
2555 return NULL;
2556 }
2557
2558 uc->cyclic = true;
2559
2560 if (dir == DMA_DEV_TO_MEM) {
2561 dev_width = uc->cfg.src_addr_width;
2562 burst = uc->cfg.src_maxburst;
2563 } else if (dir == DMA_MEM_TO_DEV) {
2564 dev_width = uc->cfg.dst_addr_width;
2565 burst = uc->cfg.dst_maxburst;
2566 } else {
2567 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2568 return NULL;
2569 }
2570
2571 if (!burst)
2572 burst = 1;
2573
2574 if (uc->config.pkt_mode)
2575 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2576 dir, flags);
2577 else
2578 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2579 dir, flags);
2580
2581 if (!d)
2582 return NULL;
2583
2584 d->sglen = buf_len / period_len;
2585
2586 d->dir = dir;
2587 d->residue = buf_len;
2588
2589 /* static TR for remote PDMA */
2590 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2591 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002592 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002593 __func__, d->static_tr.bstcnt);
2594
2595 udma_free_hwdesc(uc, d);
2596 kfree(d);
2597 return NULL;
2598 }
2599
2600 if (uc->config.metadata_size)
2601 d->vd.tx.metadata_ops = &metadata_ops;
2602
2603 return vchan_tx_prep(&uc->vc, &d->vd, flags);
2604}
2605
2606static struct dma_async_tx_descriptor *
2607udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2608 size_t len, unsigned long tx_flags)
2609{
2610 struct udma_chan *uc = to_udma_chan(chan);
2611 struct udma_desc *d;
2612 struct cppi5_tr_type15_t *tr_req;
2613 int num_tr;
2614 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2615 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2616
2617 if (uc->config.dir != DMA_MEM_TO_MEM) {
2618 dev_err(chan->device->dev,
2619 "%s: chan%d is for %s, not supporting %s\n",
2620 __func__, uc->id,
2621 dmaengine_get_direction_text(uc->config.dir),
2622 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2623 return NULL;
2624 }
2625
Peter Ujfalusia9793402020-02-14 11:14:38 +02002626 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2627 &tr0_cnt1, &tr1_cnt0);
2628 if (num_tr < 0) {
2629 dev_err(uc->ud->dev, "size %zu is not supported\n",
2630 len);
2631 return NULL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002632 }
2633
2634 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2635 if (!d)
2636 return NULL;
2637
2638 d->dir = DMA_MEM_TO_MEM;
2639 d->desc_idx = 0;
2640 d->tr_idx = 0;
2641 d->residue = len;
2642
2643 tr_req = d->hwdesc[0].tr_req_base;
2644
2645 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2646 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2647 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2648
2649 tr_req[0].addr = src;
2650 tr_req[0].icnt0 = tr0_cnt0;
2651 tr_req[0].icnt1 = tr0_cnt1;
2652 tr_req[0].icnt2 = 1;
2653 tr_req[0].icnt3 = 1;
2654 tr_req[0].dim1 = tr0_cnt0;
2655
2656 tr_req[0].daddr = dest;
2657 tr_req[0].dicnt0 = tr0_cnt0;
2658 tr_req[0].dicnt1 = tr0_cnt1;
2659 tr_req[0].dicnt2 = 1;
2660 tr_req[0].dicnt3 = 1;
2661 tr_req[0].ddim1 = tr0_cnt0;
2662
2663 if (num_tr == 2) {
2664 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2665 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2666 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2667
2668 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2669 tr_req[1].icnt0 = tr1_cnt0;
2670 tr_req[1].icnt1 = 1;
2671 tr_req[1].icnt2 = 1;
2672 tr_req[1].icnt3 = 1;
2673
2674 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2675 tr_req[1].dicnt0 = tr1_cnt0;
2676 tr_req[1].dicnt1 = 1;
2677 tr_req[1].dicnt2 = 1;
2678 tr_req[1].dicnt3 = 1;
2679 }
2680
Peter Ujfalusibe4054b2020-05-12 16:45:31 +03002681 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
2682 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002683
2684 if (uc->config.metadata_size)
2685 d->vd.tx.metadata_ops = &metadata_ops;
2686
2687 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2688}
2689
2690static void udma_issue_pending(struct dma_chan *chan)
2691{
2692 struct udma_chan *uc = to_udma_chan(chan);
2693 unsigned long flags;
2694
2695 spin_lock_irqsave(&uc->vc.lock, flags);
2696
2697 /* If we have something pending and no active descriptor, then */
2698 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2699 /*
2700 * start a descriptor if the channel is NOT [marked as
2701 * terminating _and_ it is still running (teardown has not
2702 * completed yet)].
2703 */
2704 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2705 udma_is_chan_running(uc)))
2706 udma_start(uc);
2707 }
2708
2709 spin_unlock_irqrestore(&uc->vc.lock, flags);
2710}
2711
2712static enum dma_status udma_tx_status(struct dma_chan *chan,
2713 dma_cookie_t cookie,
2714 struct dma_tx_state *txstate)
2715{
2716 struct udma_chan *uc = to_udma_chan(chan);
2717 enum dma_status ret;
2718 unsigned long flags;
2719
2720 spin_lock_irqsave(&uc->vc.lock, flags);
2721
2722 ret = dma_cookie_status(chan, cookie, txstate);
2723
Peter Ujfalusi83903182020-02-14 11:14:41 +02002724 if (!udma_is_chan_running(uc))
2725 ret = DMA_COMPLETE;
2726
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002727 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2728 ret = DMA_PAUSED;
2729
2730 if (ret == DMA_COMPLETE || !txstate)
2731 goto out;
2732
2733 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2734 u32 peer_bcnt = 0;
2735 u32 bcnt = 0;
2736 u32 residue = uc->desc->residue;
2737 u32 delay = 0;
2738
2739 if (uc->desc->dir == DMA_MEM_TO_DEV) {
2740 bcnt = udma_tchanrt_read(uc->tchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002741 UDMA_CHAN_RT_SBCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002742
2743 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2744 peer_bcnt = udma_tchanrt_read(uc->tchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002745 UDMA_CHAN_RT_PEER_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002746
2747 if (bcnt > peer_bcnt)
2748 delay = bcnt - peer_bcnt;
2749 }
2750 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2751 bcnt = udma_rchanrt_read(uc->rchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002752 UDMA_CHAN_RT_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002753
2754 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2755 peer_bcnt = udma_rchanrt_read(uc->rchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002756 UDMA_CHAN_RT_PEER_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002757
2758 if (peer_bcnt > bcnt)
2759 delay = peer_bcnt - bcnt;
2760 }
2761 } else {
2762 bcnt = udma_tchanrt_read(uc->tchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002763 UDMA_CHAN_RT_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002764 }
2765
2766 bcnt -= uc->bcnt;
2767 if (bcnt && !(bcnt % uc->desc->residue))
2768 residue = 0;
2769 else
2770 residue -= bcnt % uc->desc->residue;
2771
2772 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2773 ret = DMA_COMPLETE;
2774 delay = 0;
2775 }
2776
2777 dma_set_residue(txstate, residue);
2778 dma_set_in_flight_bytes(txstate, delay);
2779
2780 } else {
2781 ret = DMA_COMPLETE;
2782 }
2783
2784out:
2785 spin_unlock_irqrestore(&uc->vc.lock, flags);
2786 return ret;
2787}
2788
2789static int udma_pause(struct dma_chan *chan)
2790{
2791 struct udma_chan *uc = to_udma_chan(chan);
2792
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002793 /* pause the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002794 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002795 case DMA_DEV_TO_MEM:
2796 udma_rchanrt_update_bits(uc->rchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002797 UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002798 UDMA_PEER_RT_EN_PAUSE,
2799 UDMA_PEER_RT_EN_PAUSE);
2800 break;
2801 case DMA_MEM_TO_DEV:
2802 udma_tchanrt_update_bits(uc->tchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002803 UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002804 UDMA_PEER_RT_EN_PAUSE,
2805 UDMA_PEER_RT_EN_PAUSE);
2806 break;
2807 case DMA_MEM_TO_MEM:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002808 udma_tchanrt_update_bits(uc->tchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002809 UDMA_CHAN_RT_CTL_PAUSE,
2810 UDMA_CHAN_RT_CTL_PAUSE);
2811 break;
2812 default:
2813 return -EINVAL;
2814 }
2815
2816 return 0;
2817}
2818
2819static int udma_resume(struct dma_chan *chan)
2820{
2821 struct udma_chan *uc = to_udma_chan(chan);
2822
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002823 /* resume the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002824 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002825 case DMA_DEV_TO_MEM:
2826 udma_rchanrt_update_bits(uc->rchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002827 UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002828 UDMA_PEER_RT_EN_PAUSE, 0);
2829
2830 break;
2831 case DMA_MEM_TO_DEV:
2832 udma_tchanrt_update_bits(uc->tchan,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002833 UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002834 UDMA_PEER_RT_EN_PAUSE, 0);
2835 break;
2836 case DMA_MEM_TO_MEM:
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002837 udma_tchanrt_update_bits(uc->tchan, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002838 UDMA_CHAN_RT_CTL_PAUSE, 0);
2839 break;
2840 default:
2841 return -EINVAL;
2842 }
2843
2844 return 0;
2845}
2846
2847static int udma_terminate_all(struct dma_chan *chan)
2848{
2849 struct udma_chan *uc = to_udma_chan(chan);
2850 unsigned long flags;
2851 LIST_HEAD(head);
2852
2853 spin_lock_irqsave(&uc->vc.lock, flags);
2854
2855 if (udma_is_chan_running(uc))
2856 udma_stop(uc);
2857
2858 if (uc->desc) {
2859 uc->terminated_desc = uc->desc;
2860 uc->desc = NULL;
2861 uc->terminated_desc->terminated = true;
2862 cancel_delayed_work(&uc->tx_drain.work);
2863 }
2864
2865 uc->paused = false;
2866
2867 vchan_get_all_descriptors(&uc->vc, &head);
2868 spin_unlock_irqrestore(&uc->vc.lock, flags);
2869 vchan_dma_desc_free_list(&uc->vc, &head);
2870
2871 return 0;
2872}
2873
2874static void udma_synchronize(struct dma_chan *chan)
2875{
2876 struct udma_chan *uc = to_udma_chan(chan);
2877 unsigned long timeout = msecs_to_jiffies(1000);
2878
2879 vchan_synchronize(&uc->vc);
2880
2881 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2882 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2883 timeout);
2884 if (!timeout) {
2885 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2886 uc->id);
2887 udma_dump_chan_stdata(uc);
2888 udma_reset_chan(uc, true);
2889 }
2890 }
2891
2892 udma_reset_chan(uc, false);
2893 if (udma_is_chan_running(uc))
2894 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2895
2896 cancel_delayed_work_sync(&uc->tx_drain.work);
2897 udma_reset_rings(uc);
2898}
2899
2900static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2901 struct virt_dma_desc *vd,
2902 struct dmaengine_result *result)
2903{
2904 struct udma_chan *uc = to_udma_chan(&vc->chan);
2905 struct udma_desc *d;
2906
2907 if (!vd)
2908 return;
2909
2910 d = to_udma_desc(&vd->tx);
2911
2912 if (d->metadata_size)
2913 udma_fetch_epib(uc, d);
2914
2915 /* Provide residue information for the client */
2916 if (result) {
2917 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2918
2919 if (cppi5_desc_get_type(desc_vaddr) ==
2920 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2921 result->residue = d->residue -
2922 cppi5_hdesc_get_pktlen(desc_vaddr);
2923 if (result->residue)
2924 result->result = DMA_TRANS_ABORTED;
2925 else
2926 result->result = DMA_TRANS_NOERROR;
2927 } else {
2928 result->residue = 0;
2929 result->result = DMA_TRANS_NOERROR;
2930 }
2931 }
2932}
2933
2934/*
2935 * This tasklet handles the completion of a DMA descriptor by
2936 * calling its callback and freeing it.
2937 */
2938static void udma_vchan_complete(unsigned long arg)
2939{
2940 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
2941 struct virt_dma_desc *vd, *_vd;
2942 struct dmaengine_desc_callback cb;
2943 LIST_HEAD(head);
2944
2945 spin_lock_irq(&vc->lock);
2946 list_splice_tail_init(&vc->desc_completed, &head);
2947 vd = vc->cyclic;
2948 if (vd) {
2949 vc->cyclic = NULL;
2950 dmaengine_desc_get_callback(&vd->tx, &cb);
2951 } else {
2952 memset(&cb, 0, sizeof(cb));
2953 }
2954 spin_unlock_irq(&vc->lock);
2955
2956 udma_desc_pre_callback(vc, vd, NULL);
2957 dmaengine_desc_callback_invoke(&cb, NULL);
2958
2959 list_for_each_entry_safe(vd, _vd, &head, node) {
2960 struct dmaengine_result result;
2961
2962 dmaengine_desc_get_callback(&vd->tx, &cb);
2963
2964 list_del(&vd->node);
2965
2966 udma_desc_pre_callback(vc, vd, &result);
2967 dmaengine_desc_callback_invoke(&cb, &result);
2968
2969 vchan_vdesc_fini(vd);
2970 }
2971}
2972
2973static void udma_free_chan_resources(struct dma_chan *chan)
2974{
2975 struct udma_chan *uc = to_udma_chan(chan);
2976 struct udma_dev *ud = to_udma_dev(chan->device);
2977
2978 udma_terminate_all(chan);
2979 if (uc->terminated_desc) {
2980 udma_reset_chan(uc, false);
2981 udma_reset_rings(uc);
2982 }
2983
2984 cancel_delayed_work_sync(&uc->tx_drain.work);
2985 destroy_delayed_work_on_stack(&uc->tx_drain.work);
2986
2987 if (uc->irq_num_ring > 0) {
2988 free_irq(uc->irq_num_ring, uc);
2989
2990 uc->irq_num_ring = 0;
2991 }
2992 if (uc->irq_num_udma > 0) {
2993 free_irq(uc->irq_num_udma, uc);
2994
2995 uc->irq_num_udma = 0;
2996 }
2997
2998 /* Release PSI-L pairing */
2999 if (uc->psil_paired) {
3000 navss_psil_unpair(ud, uc->config.src_thread,
3001 uc->config.dst_thread);
3002 uc->psil_paired = false;
3003 }
3004
3005 vchan_free_chan_resources(&uc->vc);
3006 tasklet_kill(&uc->vc.task);
3007
3008 udma_free_tx_resources(uc);
3009 udma_free_rx_resources(uc);
3010 udma_reset_uchan(uc);
3011
3012 if (uc->use_dma_pool) {
3013 dma_pool_destroy(uc->hdesc_pool);
3014 uc->use_dma_pool = false;
3015 }
3016}
3017
3018static struct platform_driver udma_driver;
3019
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003020struct udma_filter_param {
3021 int remote_thread_id;
3022 u32 atype;
3023};
3024
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003025static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
3026{
3027 struct udma_chan_config *ucc;
3028 struct psil_endpoint_config *ep_config;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003029 struct udma_filter_param *filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003030 struct udma_chan *uc;
3031 struct udma_dev *ud;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003032
3033 if (chan->device->dev->driver != &udma_driver.driver)
3034 return false;
3035
3036 uc = to_udma_chan(chan);
3037 ucc = &uc->config;
3038 ud = uc->ud;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003039 filter_param = param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003040
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003041 if (filter_param->atype > 2) {
3042 dev_err(ud->dev, "Invalid channel atype: %u\n",
3043 filter_param->atype);
3044 return false;
3045 }
3046
3047 ucc->remote_thread_id = filter_param->remote_thread_id;
3048 ucc->atype = filter_param->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003049
3050 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3051 ucc->dir = DMA_MEM_TO_DEV;
3052 else
3053 ucc->dir = DMA_DEV_TO_MEM;
3054
3055 ep_config = psil_get_ep_config(ucc->remote_thread_id);
3056 if (IS_ERR(ep_config)) {
3057 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3058 ucc->remote_thread_id);
3059 ucc->dir = DMA_MEM_TO_MEM;
3060 ucc->remote_thread_id = -1;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003061 ucc->atype = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003062 return false;
3063 }
3064
3065 ucc->pkt_mode = ep_config->pkt_mode;
3066 ucc->channel_tpl = ep_config->channel_tpl;
3067 ucc->notdpkt = ep_config->notdpkt;
3068 ucc->ep_type = ep_config->ep_type;
3069
3070 if (ucc->ep_type != PSIL_EP_NATIVE) {
3071 const struct udma_match_data *match_data = ud->match_data;
3072
3073 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3074 ucc->enable_acc32 = ep_config->pdma_acc32;
3075 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3076 ucc->enable_burst = ep_config->pdma_burst;
3077 }
3078
3079 ucc->needs_epib = ep_config->needs_epib;
3080 ucc->psd_size = ep_config->psd_size;
3081 ucc->metadata_size =
3082 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3083 ucc->psd_size;
3084
3085 if (ucc->pkt_mode)
3086 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3087 ucc->metadata_size, ud->desc_align);
3088
3089 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3090 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3091
3092 return true;
3093}
3094
3095static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3096 struct of_dma *ofdma)
3097{
3098 struct udma_dev *ud = ofdma->of_dma_data;
3099 dma_cap_mask_t mask = ud->ddev.cap_mask;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003100 struct udma_filter_param filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003101 struct dma_chan *chan;
3102
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003103 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003104 return NULL;
3105
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003106 filter_param.remote_thread_id = dma_spec->args[0];
3107 if (dma_spec->args_count == 2)
3108 filter_param.atype = dma_spec->args[1];
3109 else
3110 filter_param.atype = 0;
3111
3112 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
3113 ofdma->of_node);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003114 if (!chan) {
3115 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3116 return ERR_PTR(-EINVAL);
3117 }
3118
3119 return chan;
3120}
3121
3122static struct udma_match_data am654_main_data = {
3123 .psil_base = 0x1000,
3124 .enable_memcpy_support = true,
3125 .statictr_z_mask = GENMASK(11, 0),
3126 .rchan_oes_offset = 0x2000,
3127 .tpl_levels = 2,
3128 .level_start_idx = {
3129 [0] = 8, /* Normal channels */
3130 [1] = 0, /* High Throughput channels */
3131 },
3132};
3133
3134static struct udma_match_data am654_mcu_data = {
3135 .psil_base = 0x6000,
Peter Ujfalusia4e68852020-03-27 16:42:28 +02003136 .enable_memcpy_support = false,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003137 .statictr_z_mask = GENMASK(11, 0),
3138 .rchan_oes_offset = 0x2000,
3139 .tpl_levels = 2,
3140 .level_start_idx = {
3141 [0] = 2, /* Normal channels */
3142 [1] = 0, /* High Throughput channels */
3143 },
3144};
3145
3146static struct udma_match_data j721e_main_data = {
3147 .psil_base = 0x1000,
3148 .enable_memcpy_support = true,
3149 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3150 .statictr_z_mask = GENMASK(23, 0),
3151 .rchan_oes_offset = 0x400,
3152 .tpl_levels = 3,
3153 .level_start_idx = {
3154 [0] = 16, /* Normal channels */
3155 [1] = 4, /* High Throughput channels */
3156 [2] = 0, /* Ultra High Throughput channels */
3157 },
3158};
3159
3160static struct udma_match_data j721e_mcu_data = {
3161 .psil_base = 0x6000,
3162 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3163 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3164 .statictr_z_mask = GENMASK(23, 0),
3165 .rchan_oes_offset = 0x400,
3166 .tpl_levels = 2,
3167 .level_start_idx = {
3168 [0] = 2, /* Normal channels */
3169 [1] = 0, /* High Throughput channels */
3170 },
3171};
3172
3173static const struct of_device_id udma_of_match[] = {
3174 {
3175 .compatible = "ti,am654-navss-main-udmap",
3176 .data = &am654_main_data,
3177 },
3178 {
3179 .compatible = "ti,am654-navss-mcu-udmap",
3180 .data = &am654_mcu_data,
3181 }, {
3182 .compatible = "ti,j721e-navss-main-udmap",
3183 .data = &j721e_main_data,
3184 }, {
3185 .compatible = "ti,j721e-navss-mcu-udmap",
3186 .data = &j721e_mcu_data,
3187 },
3188 { /* Sentinel */ },
3189};
3190
3191static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3192{
3193 struct resource *res;
3194 int i;
3195
3196 for (i = 0; i < MMR_LAST; i++) {
3197 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3198 mmr_names[i]);
3199 ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
3200 if (IS_ERR(ud->mmrs[i]))
3201 return PTR_ERR(ud->mmrs[i]);
3202 }
3203
3204 return 0;
3205}
3206
3207static int udma_setup_resources(struct udma_dev *ud)
3208{
3209 struct device *dev = ud->dev;
3210 int ch_count, ret, i, j;
3211 u32 cap2, cap3;
3212 struct ti_sci_resource_desc *rm_desc;
3213 struct ti_sci_resource *rm_res, irq_res;
3214 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3215 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3216 "ti,sci-rm-range-rchan",
3217 "ti,sci-rm-range-rflow" };
3218
3219 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
3220 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
3221
3222 ud->rflow_cnt = cap3 & 0x3fff;
3223 ud->tchan_cnt = cap2 & 0x1ff;
3224 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
3225 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
3226 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3227
3228 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3229 sizeof(unsigned long), GFP_KERNEL);
3230 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3231 GFP_KERNEL);
3232 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3233 sizeof(unsigned long), GFP_KERNEL);
3234 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3235 GFP_KERNEL);
3236 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3237 sizeof(unsigned long),
3238 GFP_KERNEL);
3239 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3240 BITS_TO_LONGS(ud->rflow_cnt),
3241 sizeof(unsigned long),
3242 GFP_KERNEL);
3243 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3244 sizeof(unsigned long),
3245 GFP_KERNEL);
3246 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3247 GFP_KERNEL);
3248
3249 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3250 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3251 !ud->rflows || !ud->rflow_in_use)
3252 return -ENOMEM;
3253
3254 /*
3255 * RX flows with the same Ids as RX channels are reserved to be used
3256 * as default flows if remote HW can't generate flow_ids. Those
3257 * RX flows can be requested only explicitly by id.
3258 */
3259 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3260
3261 /* by default no GP rflows are assigned to Linux */
3262 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3263
3264 /* Get resource ranges from tisci */
3265 for (i = 0; i < RM_RANGE_LAST; i++)
3266 tisci_rm->rm_ranges[i] =
3267 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3268 tisci_rm->tisci_dev_id,
3269 (char *)range_names[i]);
3270
3271 /* tchan ranges */
3272 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3273 if (IS_ERR(rm_res)) {
3274 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3275 } else {
3276 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3277 for (i = 0; i < rm_res->sets; i++) {
3278 rm_desc = &rm_res->desc[i];
3279 bitmap_clear(ud->tchan_map, rm_desc->start,
3280 rm_desc->num);
3281 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3282 rm_desc->start, rm_desc->num);
3283 }
3284 }
3285 irq_res.sets = rm_res->sets;
3286
3287 /* rchan and matching default flow ranges */
3288 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3289 if (IS_ERR(rm_res)) {
3290 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3291 } else {
3292 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3293 for (i = 0; i < rm_res->sets; i++) {
3294 rm_desc = &rm_res->desc[i];
3295 bitmap_clear(ud->rchan_map, rm_desc->start,
3296 rm_desc->num);
3297 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3298 rm_desc->start, rm_desc->num);
3299 }
3300 }
3301
3302 irq_res.sets += rm_res->sets;
3303 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3304 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3305 for (i = 0; i < rm_res->sets; i++) {
3306 irq_res.desc[i].start = rm_res->desc[i].start;
3307 irq_res.desc[i].num = rm_res->desc[i].num;
3308 }
3309 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3310 for (j = 0; j < rm_res->sets; j++, i++) {
3311 irq_res.desc[i].start = rm_res->desc[j].start +
3312 ud->match_data->rchan_oes_offset;
3313 irq_res.desc[i].num = rm_res->desc[j].num;
3314 }
3315 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3316 kfree(irq_res.desc);
3317 if (ret) {
3318 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3319 return ret;
3320 }
3321
3322 /* GP rflow ranges */
3323 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3324 if (IS_ERR(rm_res)) {
3325 /* all gp flows are assigned exclusively to Linux */
3326 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3327 ud->rflow_cnt - ud->rchan_cnt);
3328 } else {
3329 for (i = 0; i < rm_res->sets; i++) {
3330 rm_desc = &rm_res->desc[i];
3331 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3332 rm_desc->num);
3333 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3334 rm_desc->start, rm_desc->num);
3335 }
3336 }
3337
3338 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3339 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3340 if (!ch_count)
3341 return -ENODEV;
3342
3343 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3344 GFP_KERNEL);
3345 if (!ud->channels)
3346 return -ENOMEM;
3347
3348 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3349 ch_count,
3350 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3351 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3352 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3353 ud->rflow_cnt));
3354
3355 return ch_count;
3356}
3357
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003358static int udma_setup_rx_flush(struct udma_dev *ud)
3359{
3360 struct udma_rx_flush *rx_flush = &ud->rx_flush;
3361 struct cppi5_desc_hdr_t *tr_desc;
3362 struct cppi5_tr_type1_t *tr_req;
3363 struct cppi5_host_desc_t *desc;
3364 struct device *dev = ud->dev;
3365 struct udma_hwdesc *hwdesc;
3366 size_t tr_size;
3367
3368 /* Allocate 1K buffer for discarded data on RX channel teardown */
3369 rx_flush->buffer_size = SZ_1K;
3370 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3371 GFP_KERNEL);
3372 if (!rx_flush->buffer_vaddr)
3373 return -ENOMEM;
3374
3375 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3376 rx_flush->buffer_size,
3377 DMA_TO_DEVICE);
3378 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3379 return -ENOMEM;
3380
3381 /* Set up descriptor to be used for TR mode */
3382 hwdesc = &rx_flush->hwdescs[0];
3383 tr_size = sizeof(struct cppi5_tr_type1_t);
3384 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3385 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3386 ud->desc_align);
3387
3388 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3389 GFP_KERNEL);
3390 if (!hwdesc->cppi5_desc_vaddr)
3391 return -ENOMEM;
3392
3393 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3394 hwdesc->cppi5_desc_size,
3395 DMA_TO_DEVICE);
3396 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3397 return -ENOMEM;
3398
3399 /* Start of the TR req records */
3400 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3401 /* Start address of the TR response array */
3402 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3403
3404 tr_desc = hwdesc->cppi5_desc_vaddr;
3405 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3406 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3407 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3408
3409 tr_req = hwdesc->tr_req_base;
3410 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3411 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3412 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3413
3414 tr_req->addr = rx_flush->buffer_paddr;
3415 tr_req->icnt0 = rx_flush->buffer_size;
3416 tr_req->icnt1 = 1;
3417
Peter Ujfalusi5bbeea32020-05-12 16:45:44 +03003418 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3419 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3420
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003421 /* Set up descriptor to be used for packet mode */
3422 hwdesc = &rx_flush->hwdescs[1];
3423 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3424 CPPI5_INFO0_HDESC_EPIB_SIZE +
3425 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3426 ud->desc_align);
3427
3428 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3429 GFP_KERNEL);
3430 if (!hwdesc->cppi5_desc_vaddr)
3431 return -ENOMEM;
3432
3433 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3434 hwdesc->cppi5_desc_size,
3435 DMA_TO_DEVICE);
3436 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3437 return -ENOMEM;
3438
3439 desc = hwdesc->cppi5_desc_vaddr;
3440 cppi5_hdesc_init(desc, 0, 0);
3441 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3442 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3443
3444 cppi5_hdesc_attach_buf(desc,
3445 rx_flush->buffer_paddr, rx_flush->buffer_size,
3446 rx_flush->buffer_paddr, rx_flush->buffer_size);
3447
3448 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3449 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3450 return 0;
3451}
3452
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003453#ifdef CONFIG_DEBUG_FS
3454static void udma_dbg_summary_show_chan(struct seq_file *s,
3455 struct dma_chan *chan)
3456{
3457 struct udma_chan *uc = to_udma_chan(chan);
3458 struct udma_chan_config *ucc = &uc->config;
3459
3460 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
3461 chan->dbg_client_name ?: "in-use");
3462 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
3463
3464 switch (uc->config.dir) {
3465 case DMA_MEM_TO_MEM:
3466 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
3467 ucc->src_thread, ucc->dst_thread);
3468 break;
3469 case DMA_DEV_TO_MEM:
3470 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
3471 ucc->src_thread, ucc->dst_thread);
3472 break;
3473 case DMA_MEM_TO_DEV:
3474 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
3475 ucc->src_thread, ucc->dst_thread);
3476 break;
3477 default:
3478 seq_printf(s, ")\n");
3479 return;
3480 }
3481
3482 if (ucc->ep_type == PSIL_EP_NATIVE) {
3483 seq_printf(s, "PSI-L Native");
3484 if (ucc->metadata_size) {
3485 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
3486 if (ucc->psd_size)
3487 seq_printf(s, " PSDsize:%u", ucc->psd_size);
3488 seq_printf(s, " ]");
3489 }
3490 } else {
3491 seq_printf(s, "PDMA");
3492 if (ucc->enable_acc32 || ucc->enable_burst)
3493 seq_printf(s, "[%s%s ]",
3494 ucc->enable_acc32 ? " ACC32" : "",
3495 ucc->enable_burst ? " BURST" : "");
3496 }
3497
3498 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
3499}
3500
3501static void udma_dbg_summary_show(struct seq_file *s,
3502 struct dma_device *dma_dev)
3503{
3504 struct dma_chan *chan;
3505
3506 list_for_each_entry(chan, &dma_dev->channels, device_node) {
3507 if (chan->client_count)
3508 udma_dbg_summary_show_chan(s, chan);
3509 }
3510}
3511#endif /* CONFIG_DEBUG_FS */
3512
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003513#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3514 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3515 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3516 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3517 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3518
3519static int udma_probe(struct platform_device *pdev)
3520{
3521 struct device_node *navss_node = pdev->dev.parent->of_node;
3522 struct device *dev = &pdev->dev;
3523 struct udma_dev *ud;
3524 const struct of_device_id *match;
3525 int i, ret;
3526 int ch_count;
3527
3528 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3529 if (ret)
3530 dev_err(dev, "failed to set dma mask stuff\n");
3531
3532 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3533 if (!ud)
3534 return -ENOMEM;
3535
3536 ret = udma_get_mmrs(pdev, ud);
3537 if (ret)
3538 return ret;
3539
3540 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3541 if (IS_ERR(ud->tisci_rm.tisci))
3542 return PTR_ERR(ud->tisci_rm.tisci);
3543
3544 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3545 &ud->tisci_rm.tisci_dev_id);
3546 if (ret) {
3547 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3548 return ret;
3549 }
3550 pdev->id = ud->tisci_rm.tisci_dev_id;
3551
3552 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3553 &ud->tisci_rm.tisci_navss_dev_id);
3554 if (ret) {
3555 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3556 return ret;
3557 }
3558
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003559 ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
3560 if (!ret && ud->atype > 2) {
3561 dev_err(dev, "Invalid atype: %u\n", ud->atype);
3562 return -EINVAL;
3563 }
3564
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003565 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3566 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3567
3568 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3569 if (IS_ERR(ud->ringacc))
3570 return PTR_ERR(ud->ringacc);
3571
3572 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3573 DOMAIN_BUS_TI_SCI_INTA_MSI);
3574 if (!dev->msi_domain) {
3575 dev_err(dev, "Failed to get MSI domain\n");
3576 return -EPROBE_DEFER;
3577 }
3578
3579 match = of_match_node(udma_of_match, dev->of_node);
3580 if (!match) {
3581 dev_err(dev, "No compatible match found\n");
3582 return -ENODEV;
3583 }
3584 ud->match_data = match->data;
3585
3586 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3587 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3588
3589 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3590 ud->ddev.device_config = udma_slave_config;
3591 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3592 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3593 ud->ddev.device_issue_pending = udma_issue_pending;
3594 ud->ddev.device_tx_status = udma_tx_status;
3595 ud->ddev.device_pause = udma_pause;
3596 ud->ddev.device_resume = udma_resume;
3597 ud->ddev.device_terminate_all = udma_terminate_all;
3598 ud->ddev.device_synchronize = udma_synchronize;
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003599#ifdef CONFIG_DEBUG_FS
3600 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3601#endif
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003602
3603 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3604 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3605 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3606 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3607 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3608 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3609 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3610 DESC_METADATA_ENGINE;
3611 if (ud->match_data->enable_memcpy_support) {
3612 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3613 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3614 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3615 }
3616
3617 ud->ddev.dev = dev;
3618 ud->dev = dev;
3619 ud->psil_base = ud->match_data->psil_base;
3620
3621 INIT_LIST_HEAD(&ud->ddev.channels);
3622 INIT_LIST_HEAD(&ud->desc_to_purge);
3623
3624 ch_count = udma_setup_resources(ud);
3625 if (ch_count <= 0)
3626 return ch_count;
3627
3628 spin_lock_init(&ud->lock);
3629 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3630
3631 ud->desc_align = 64;
3632 if (ud->desc_align < dma_get_cache_alignment())
3633 ud->desc_align = dma_get_cache_alignment();
3634
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003635 ret = udma_setup_rx_flush(ud);
3636 if (ret)
3637 return ret;
3638
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003639 for (i = 0; i < ud->tchan_cnt; i++) {
3640 struct udma_tchan *tchan = &ud->tchans[i];
3641
3642 tchan->id = i;
3643 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3644 }
3645
3646 for (i = 0; i < ud->rchan_cnt; i++) {
3647 struct udma_rchan *rchan = &ud->rchans[i];
3648
3649 rchan->id = i;
3650 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3651 }
3652
3653 for (i = 0; i < ud->rflow_cnt; i++) {
3654 struct udma_rflow *rflow = &ud->rflows[i];
3655
3656 rflow->id = i;
3657 }
3658
3659 for (i = 0; i < ch_count; i++) {
3660 struct udma_chan *uc = &ud->channels[i];
3661
3662 uc->ud = ud;
3663 uc->vc.desc_free = udma_desc_free;
3664 uc->id = i;
3665 uc->tchan = NULL;
3666 uc->rchan = NULL;
3667 uc->config.remote_thread_id = -1;
3668 uc->config.dir = DMA_MEM_TO_MEM;
3669 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3670 dev_name(dev), i);
3671
3672 vchan_init(&uc->vc, &ud->ddev);
3673 /* Use custom vchan completion handling */
3674 tasklet_init(&uc->vc.task, udma_vchan_complete,
3675 (unsigned long)&uc->vc);
3676 init_completion(&uc->teardown_completed);
3677 }
3678
3679 ret = dma_async_device_register(&ud->ddev);
3680 if (ret) {
3681 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3682 return ret;
3683 }
3684
3685 platform_set_drvdata(pdev, ud);
3686
3687 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3688 if (ret) {
3689 dev_err(dev, "failed to register of_dma controller\n");
3690 dma_async_device_unregister(&ud->ddev);
3691 }
3692
3693 return ret;
3694}
3695
3696static struct platform_driver udma_driver = {
3697 .driver = {
3698 .name = "ti-udma",
3699 .of_match_table = udma_of_match,
3700 .suppress_bind_attrs = true,
3701 },
3702 .probe = udma_probe,
3703};
3704builtin_platform_driver(udma_driver);
Grygorii Strashkod7024192019-12-23 13:04:51 +02003705
3706/* Private interfaces to UDMA */
3707#include "k3-udma-private.c"