blob: 0e8426dd18a7568ee4faf746580f8d29812f2860 [file] [log] [blame]
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7#include <linux/kernel.h>
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02008#include <linux/delay.h>
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02009#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmapool.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +030019#include <linux/sys_soc.h>
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +020020#include <linux/of.h>
21#include <linux/of_dma.h>
22#include <linux/of_device.h>
23#include <linux/of_irq.h>
24#include <linux/workqueue.h>
25#include <linux/completion.h>
26#include <linux/soc/ti/k3-ringacc.h>
27#include <linux/soc/ti/ti_sci_protocol.h>
28#include <linux/soc/ti/ti_sci_inta_msi.h>
29#include <linux/dma/ti-cppi5.h>
30
31#include "../virt-dma.h"
32#include "k3-udma.h"
33#include "k3-psil-priv.h"
34
35struct udma_static_tr {
36 u8 elsize; /* RPSTR0 */
37 u16 elcnt; /* RPSTR0 */
38 u16 bstcnt; /* RPSTR1 */
39};
40
41#define K3_UDMA_MAX_RFLOWS 1024
42#define K3_UDMA_DEFAULT_RING_SIZE 16
43
44/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
45#define UDMA_RFLOW_SRCTAG_NONE 0
46#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
47#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
48#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
49
50#define UDMA_RFLOW_DSTTAG_NONE 0
51#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
52#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
53#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
54#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
55
56struct udma_chan;
57
58enum udma_mmr {
59 MMR_GCFG = 0,
60 MMR_RCHANRT,
61 MMR_TCHANRT,
62 MMR_LAST,
63};
64
65static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
66
67struct udma_tchan {
68 void __iomem *reg_rt;
69
70 int id;
71 struct k3_ring *t_ring; /* Transmit ring */
72 struct k3_ring *tc_ring; /* Transmit Completion ring */
73};
74
75struct udma_rflow {
76 int id;
77 struct k3_ring *fd_ring; /* Free Descriptor ring */
78 struct k3_ring *r_ring; /* Receive ring */
79};
80
81struct udma_rchan {
82 void __iomem *reg_rt;
83
84 int id;
85};
86
87#define UDMA_FLAG_PDMA_ACC32 BIT(0)
88#define UDMA_FLAG_PDMA_BURST BIT(1)
89
90struct udma_match_data {
91 u32 psil_base;
92 bool enable_memcpy_support;
93 u32 flags;
94 u32 statictr_z_mask;
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +030095};
96
97struct udma_soc_data {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +020098 u32 rchan_oes_offset;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +020099};
100
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200101struct udma_hwdesc {
102 size_t cppi5_desc_size;
103 void *cppi5_desc_vaddr;
104 dma_addr_t cppi5_desc_paddr;
105
106 /* TR descriptor internal pointers */
107 void *tr_req_base;
108 struct cppi5_tr_resp_t *tr_resp_base;
109};
110
111struct udma_rx_flush {
112 struct udma_hwdesc hwdescs[2];
113
114 size_t buffer_size;
115 void *buffer_vaddr;
116 dma_addr_t buffer_paddr;
117};
118
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200119struct udma_dev {
120 struct dma_device ddev;
121 struct device *dev;
122 void __iomem *mmrs[MMR_LAST];
123 const struct udma_match_data *match_data;
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +0300124 const struct udma_soc_data *soc_data;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200125
Peter Ujfalusidaf4ad02020-07-17 15:09:03 +0300126 u8 tpl_levels;
127 u32 tpl_start_idx[3];
128
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200129 size_t desc_align; /* alignment to use for descriptors */
130
131 struct udma_tisci_rm tisci_rm;
132
133 struct k3_ringacc *ringacc;
134
135 struct work_struct purge_work;
136 struct list_head desc_to_purge;
137 spinlock_t lock;
138
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200139 struct udma_rx_flush rx_flush;
140
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200141 int tchan_cnt;
142 int echan_cnt;
143 int rchan_cnt;
144 int rflow_cnt;
145 unsigned long *tchan_map;
146 unsigned long *rchan_map;
147 unsigned long *rflow_gp_map;
148 unsigned long *rflow_gp_map_allocated;
149 unsigned long *rflow_in_use;
150
151 struct udma_tchan *tchans;
152 struct udma_rchan *rchans;
153 struct udma_rflow *rflows;
154
155 struct udma_chan *channels;
156 u32 psil_base;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200157 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200158};
159
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200160struct udma_desc {
161 struct virt_dma_desc vd;
162
163 bool terminated;
164
165 enum dma_transfer_direction dir;
166
167 struct udma_static_tr static_tr;
168 u32 residue;
169
170 unsigned int sglen;
171 unsigned int desc_idx; /* Only used for cyclic in packet mode */
172 unsigned int tr_idx;
173
174 u32 metadata_size;
175 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
176
177 unsigned int hwdesc_count;
Gustavo A. R. Silva466f9662020-05-28 09:35:11 -0500178 struct udma_hwdesc hwdesc[];
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200179};
180
181enum udma_chan_state {
182 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
183 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
184 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
185};
186
187struct udma_tx_drain {
188 struct delayed_work work;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200189 ktime_t tstamp;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200190 u32 residue;
191};
192
193struct udma_chan_config {
194 bool pkt_mode; /* TR or packet */
195 bool needs_epib; /* EPIB is needed for the communication or not */
196 u32 psd_size; /* size of Protocol Specific Data */
197 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
198 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
199 bool notdpkt; /* Suppress sending TDC packet */
200 int remote_thread_id;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200201 u32 atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200202 u32 src_thread;
203 u32 dst_thread;
204 enum psil_endpoint_type ep_type;
205 bool enable_acc32;
206 bool enable_burst;
207 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
208
209 enum dma_transfer_direction dir;
210};
211
212struct udma_chan {
213 struct virt_dma_chan vc;
214 struct dma_slave_config cfg;
215 struct udma_dev *ud;
216 struct udma_desc *desc;
217 struct udma_desc *terminated_desc;
218 struct udma_static_tr static_tr;
219 char *name;
220
221 struct udma_tchan *tchan;
222 struct udma_rchan *rchan;
223 struct udma_rflow *rflow;
224
225 bool psil_paired;
226
227 int irq_num_ring;
228 int irq_num_udma;
229
230 bool cyclic;
231 bool paused;
232
233 enum udma_chan_state state;
234 struct completion teardown_completed;
235
236 struct udma_tx_drain tx_drain;
237
238 u32 bcnt; /* number of bytes completed since the start of the channel */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200239
240 /* Channel configuration parameters */
241 struct udma_chan_config config;
242
243 /* dmapool for packet mode descriptors */
244 bool use_dma_pool;
245 struct dma_pool *hdesc_pool;
246
247 u32 id;
248};
249
250static inline struct udma_dev *to_udma_dev(struct dma_device *d)
251{
252 return container_of(d, struct udma_dev, ddev);
253}
254
255static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
256{
257 return container_of(c, struct udma_chan, vc.chan);
258}
259
260static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
261{
262 return container_of(t, struct udma_desc, vd.tx);
263}
264
265/* Generic register access functions */
266static inline u32 udma_read(void __iomem *base, int reg)
267{
268 return readl(base + reg);
269}
270
271static inline void udma_write(void __iomem *base, int reg, u32 val)
272{
273 writel(val, base + reg);
274}
275
276static inline void udma_update_bits(void __iomem *base, int reg,
277 u32 mask, u32 val)
278{
279 u32 tmp, orig;
280
281 orig = readl(base + reg);
282 tmp = orig & ~mask;
283 tmp |= (val & mask);
284
285 if (tmp != orig)
286 writel(tmp, base + reg);
287}
288
289/* TCHANRT */
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300290static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200291{
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300292 if (!uc->tchan)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200293 return 0;
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300294 return udma_read(uc->tchan->reg_rt, reg);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200295}
296
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300297static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200298{
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300299 if (!uc->tchan)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200300 return;
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300301 udma_write(uc->tchan->reg_rt, reg, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200302}
303
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300304static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200305 u32 mask, u32 val)
306{
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300307 if (!uc->tchan)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200308 return;
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300309 udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200310}
311
312/* RCHANRT */
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300313static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200314{
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300315 if (!uc->rchan)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200316 return 0;
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300317 return udma_read(uc->rchan->reg_rt, reg);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200318}
319
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300320static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200321{
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300322 if (!uc->rchan)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200323 return;
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300324 udma_write(uc->rchan->reg_rt, reg, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200325}
326
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300327static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200328 u32 mask, u32 val)
329{
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300330 if (!uc->rchan)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200331 return;
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300332 udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200333}
334
335static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
336{
337 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
338
339 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
340 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
341 tisci_rm->tisci_navss_dev_id,
342 src_thread, dst_thread);
343}
344
345static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
346 u32 dst_thread)
347{
348 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
349
350 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
351 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
352 tisci_rm->tisci_navss_dev_id,
353 src_thread, dst_thread);
354}
355
356static void udma_reset_uchan(struct udma_chan *uc)
357{
358 memset(&uc->config, 0, sizeof(uc->config));
359 uc->config.remote_thread_id = -1;
360 uc->state = UDMA_CHAN_IS_IDLE;
361}
362
363static void udma_dump_chan_stdata(struct udma_chan *uc)
364{
365 struct device *dev = uc->ud->dev;
366 u32 offset;
367 int i;
368
369 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
370 dev_dbg(dev, "TCHAN State data:\n");
371 for (i = 0; i < 32; i++) {
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300372 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200373 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300374 udma_tchanrt_read(uc, offset));
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200375 }
376 }
377
378 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
379 dev_dbg(dev, "RCHAN State data:\n");
380 for (i = 0; i < 32; i++) {
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300381 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200382 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300383 udma_rchanrt_read(uc, offset));
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200384 }
385 }
386}
387
388static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
389 int idx)
390{
391 return d->hwdesc[idx].cppi5_desc_paddr;
392}
393
394static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
395{
396 return d->hwdesc[idx].cppi5_desc_vaddr;
397}
398
399static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
400 dma_addr_t paddr)
401{
402 struct udma_desc *d = uc->terminated_desc;
403
404 if (d) {
405 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
406 d->desc_idx);
407
408 if (desc_paddr != paddr)
409 d = NULL;
410 }
411
412 if (!d) {
413 d = uc->desc;
414 if (d) {
415 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
416 d->desc_idx);
417
418 if (desc_paddr != paddr)
419 d = NULL;
420 }
421 }
422
423 return d;
424}
425
426static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
427{
428 if (uc->use_dma_pool) {
429 int i;
430
431 for (i = 0; i < d->hwdesc_count; i++) {
432 if (!d->hwdesc[i].cppi5_desc_vaddr)
433 continue;
434
435 dma_pool_free(uc->hdesc_pool,
436 d->hwdesc[i].cppi5_desc_vaddr,
437 d->hwdesc[i].cppi5_desc_paddr);
438
439 d->hwdesc[i].cppi5_desc_vaddr = NULL;
440 }
441 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
442 struct udma_dev *ud = uc->ud;
443
444 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
445 d->hwdesc[0].cppi5_desc_vaddr,
446 d->hwdesc[0].cppi5_desc_paddr);
447
448 d->hwdesc[0].cppi5_desc_vaddr = NULL;
449 }
450}
451
452static void udma_purge_desc_work(struct work_struct *work)
453{
454 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
455 struct virt_dma_desc *vd, *_vd;
456 unsigned long flags;
457 LIST_HEAD(head);
458
459 spin_lock_irqsave(&ud->lock, flags);
460 list_splice_tail_init(&ud->desc_to_purge, &head);
461 spin_unlock_irqrestore(&ud->lock, flags);
462
463 list_for_each_entry_safe(vd, _vd, &head, node) {
464 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
465 struct udma_desc *d = to_udma_desc(&vd->tx);
466
467 udma_free_hwdesc(uc, d);
468 list_del(&vd->node);
469 kfree(d);
470 }
471
472 /* If more to purge, schedule the work again */
473 if (!list_empty(&ud->desc_to_purge))
474 schedule_work(&ud->purge_work);
475}
476
477static void udma_desc_free(struct virt_dma_desc *vd)
478{
479 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
480 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
481 struct udma_desc *d = to_udma_desc(&vd->tx);
482 unsigned long flags;
483
484 if (uc->terminated_desc == d)
485 uc->terminated_desc = NULL;
486
487 if (uc->use_dma_pool) {
488 udma_free_hwdesc(uc, d);
489 kfree(d);
490 return;
491 }
492
493 spin_lock_irqsave(&ud->lock, flags);
494 list_add_tail(&vd->node, &ud->desc_to_purge);
495 spin_unlock_irqrestore(&ud->lock, flags);
496
497 schedule_work(&ud->purge_work);
498}
499
500static bool udma_is_chan_running(struct udma_chan *uc)
501{
502 u32 trt_ctl = 0;
503 u32 rrt_ctl = 0;
504
505 if (uc->tchan)
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300506 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200507 if (uc->rchan)
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300508 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200509
510 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
511 return true;
512
513 return false;
514}
515
516static bool udma_is_chan_paused(struct udma_chan *uc)
517{
518 u32 val, pause_mask;
519
Peter Ujfalusic7450bb2020-02-14 11:14:40 +0200520 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200521 case DMA_DEV_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300522 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200523 pause_mask = UDMA_PEER_RT_EN_PAUSE;
524 break;
525 case DMA_MEM_TO_DEV:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300526 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200527 pause_mask = UDMA_PEER_RT_EN_PAUSE;
528 break;
529 case DMA_MEM_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300530 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200531 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
532 break;
533 default:
534 return false;
535 }
536
537 if (val & pause_mask)
538 return true;
539
540 return false;
541}
542
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200543static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
544{
545 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
546}
547
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200548static int udma_push_to_ring(struct udma_chan *uc, int idx)
549{
550 struct udma_desc *d = uc->desc;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200551 struct k3_ring *ring = NULL;
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200552 dma_addr_t paddr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200553
554 switch (uc->config.dir) {
555 case DMA_DEV_TO_MEM:
556 ring = uc->rflow->fd_ring;
557 break;
558 case DMA_MEM_TO_DEV:
559 case DMA_MEM_TO_MEM:
560 ring = uc->tchan->t_ring;
561 break;
562 default:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200563 return -EINVAL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200564 }
565
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200566 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
567 if (idx == -1) {
568 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
569 } else {
570 paddr = udma_curr_cppi5_desc_paddr(d, idx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200571
572 wmb(); /* Ensure that writes are not moved over this point */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200573 }
574
Peter Ujfalusi6fea8732020-05-12 16:46:11 +0300575 return k3_ringacc_ring_push(ring, &paddr);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200576}
577
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200578static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
579{
580 if (uc->config.dir != DMA_DEV_TO_MEM)
581 return false;
582
583 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
584 return true;
585
586 return false;
587}
588
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200589static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
590{
591 struct k3_ring *ring = NULL;
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300592 int ret;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200593
594 switch (uc->config.dir) {
595 case DMA_DEV_TO_MEM:
596 ring = uc->rflow->r_ring;
597 break;
598 case DMA_MEM_TO_DEV:
599 case DMA_MEM_TO_MEM:
600 ring = uc->tchan->tc_ring;
601 break;
602 default:
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300603 return -ENOENT;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200604 }
605
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300606 ret = k3_ringacc_ring_pop(ring, addr);
607 if (ret)
608 return ret;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200609
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300610 rmb(); /* Ensure that reads are not moved before this point */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200611
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300612 /* Teardown completion */
613 if (cppi5_desc_is_tdcm(*addr))
614 return 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200615
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300616 /* Check for flush descriptor */
617 if (udma_desc_is_rx_flush(uc, *addr))
618 return -ENOENT;
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200619
Peter Ujfalusi3b8bee22020-07-07 13:23:49 +0300620 return 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200621}
622
623static void udma_reset_rings(struct udma_chan *uc)
624{
625 struct k3_ring *ring1 = NULL;
626 struct k3_ring *ring2 = NULL;
627
628 switch (uc->config.dir) {
629 case DMA_DEV_TO_MEM:
630 if (uc->rchan) {
631 ring1 = uc->rflow->fd_ring;
632 ring2 = uc->rflow->r_ring;
633 }
634 break;
635 case DMA_MEM_TO_DEV:
636 case DMA_MEM_TO_MEM:
637 if (uc->tchan) {
638 ring1 = uc->tchan->t_ring;
639 ring2 = uc->tchan->tc_ring;
640 }
641 break;
642 default:
643 break;
644 }
645
646 if (ring1)
647 k3_ringacc_ring_reset_dma(ring1,
648 k3_ringacc_ring_get_occ(ring1));
649 if (ring2)
650 k3_ringacc_ring_reset(ring2);
651
652 /* make sure we are not leaking memory by stalled descriptor */
653 if (uc->terminated_desc) {
654 udma_desc_free(&uc->terminated_desc->vd);
655 uc->terminated_desc = NULL;
656 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200657}
658
659static void udma_reset_counters(struct udma_chan *uc)
660{
661 u32 val;
662
663 if (uc->tchan) {
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300664 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
665 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200666
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300667 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
668 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200669
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300670 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
671 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200672
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300673 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
674 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200675 }
676
677 if (uc->rchan) {
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300678 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
679 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200680
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300681 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
682 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200683
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300684 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
685 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200686
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300687 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
688 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200689 }
690
691 uc->bcnt = 0;
692}
693
694static int udma_reset_chan(struct udma_chan *uc, bool hard)
695{
696 switch (uc->config.dir) {
697 case DMA_DEV_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300698 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
699 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200700 break;
701 case DMA_MEM_TO_DEV:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300702 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
703 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200704 break;
705 case DMA_MEM_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300706 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
707 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200708 break;
709 default:
710 return -EINVAL;
711 }
712
713 /* Reset all counters */
714 udma_reset_counters(uc);
715
716 /* Hard reset: re-initialize the channel to reset */
717 if (hard) {
718 struct udma_chan_config ucc_backup;
719 int ret;
720
721 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
722 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
723
724 /* restore the channel configuration */
725 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
726 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
727 if (ret)
728 return ret;
729
730 /*
731 * Setting forced teardown after forced reset helps recovering
732 * the rchan.
733 */
734 if (uc->config.dir == DMA_DEV_TO_MEM)
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300735 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200736 UDMA_CHAN_RT_CTL_EN |
737 UDMA_CHAN_RT_CTL_TDOWN |
738 UDMA_CHAN_RT_CTL_FTDOWN);
739 }
740 uc->state = UDMA_CHAN_IS_IDLE;
741
742 return 0;
743}
744
745static void udma_start_desc(struct udma_chan *uc)
746{
747 struct udma_chan_config *ucc = &uc->config;
748
749 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
750 int i;
751
752 /* Push all descriptors to ring for packet mode cyclic or RX */
753 for (i = 0; i < uc->desc->sglen; i++)
754 udma_push_to_ring(uc, i);
755 } else {
756 udma_push_to_ring(uc, 0);
757 }
758}
759
760static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
761{
762 /* Only PDMAs have staticTR */
763 if (uc->config.ep_type == PSIL_EP_NATIVE)
764 return false;
765
766 /* Check if the staticTR configuration has changed for TX */
767 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
768 return true;
769
770 return false;
771}
772
773static int udma_start(struct udma_chan *uc)
774{
775 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
776
777 if (!vd) {
778 uc->desc = NULL;
779 return -ENOENT;
780 }
781
782 list_del(&vd->node);
783
784 uc->desc = to_udma_desc(&vd->tx);
785
786 /* Channel is already running and does not need reconfiguration */
787 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
788 udma_start_desc(uc);
789 goto out;
790 }
791
792 /* Make sure that we clear the teardown bit, if it is set */
793 udma_reset_chan(uc, false);
794
795 /* Push descriptors before we start the channel */
796 udma_start_desc(uc);
797
798 switch (uc->desc->dir) {
799 case DMA_DEV_TO_MEM:
800 /* Config remote TR */
801 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
802 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
803 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
804 const struct udma_match_data *match_data =
805 uc->ud->match_data;
806
807 if (uc->config.enable_acc32)
808 val |= PDMA_STATIC_TR_XY_ACC32;
809 if (uc->config.enable_burst)
810 val |= PDMA_STATIC_TR_XY_BURST;
811
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300812 udma_rchanrt_write(uc,
813 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
814 val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200815
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300816 udma_rchanrt_write(uc,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300817 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200818 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
819 match_data->statictr_z_mask));
820
821 /* save the current staticTR configuration */
822 memcpy(&uc->static_tr, &uc->desc->static_tr,
823 sizeof(uc->static_tr));
824 }
825
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300826 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200827 UDMA_CHAN_RT_CTL_EN);
828
829 /* Enable remote */
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300830 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200831 UDMA_PEER_RT_EN_ENABLE);
832
833 break;
834 case DMA_MEM_TO_DEV:
835 /* Config remote TR */
836 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
837 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
838 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
839
840 if (uc->config.enable_acc32)
841 val |= PDMA_STATIC_TR_XY_ACC32;
842 if (uc->config.enable_burst)
843 val |= PDMA_STATIC_TR_XY_BURST;
844
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300845 udma_tchanrt_write(uc,
846 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
847 val);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200848
849 /* save the current staticTR configuration */
850 memcpy(&uc->static_tr, &uc->desc->static_tr,
851 sizeof(uc->static_tr));
852 }
853
854 /* Enable remote */
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300855 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200856 UDMA_PEER_RT_EN_ENABLE);
857
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300858 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200859 UDMA_CHAN_RT_CTL_EN);
860
861 break;
862 case DMA_MEM_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300863 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200864 UDMA_CHAN_RT_CTL_EN);
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300865 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200866 UDMA_CHAN_RT_CTL_EN);
867
868 break;
869 default:
870 return -EINVAL;
871 }
872
873 uc->state = UDMA_CHAN_IS_ACTIVE;
874out:
875
876 return 0;
877}
878
879static int udma_stop(struct udma_chan *uc)
880{
881 enum udma_chan_state old_state = uc->state;
882
883 uc->state = UDMA_CHAN_IS_TERMINATING;
884 reinit_completion(&uc->teardown_completed);
885
886 switch (uc->config.dir) {
887 case DMA_DEV_TO_MEM:
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +0200888 if (!uc->cyclic && !uc->desc)
889 udma_push_to_ring(uc, -1);
890
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300891 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200892 UDMA_PEER_RT_EN_ENABLE |
893 UDMA_PEER_RT_EN_TEARDOWN);
894 break;
895 case DMA_MEM_TO_DEV:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300896 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200897 UDMA_PEER_RT_EN_ENABLE |
898 UDMA_PEER_RT_EN_FLUSH);
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300899 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200900 UDMA_CHAN_RT_CTL_EN |
901 UDMA_CHAN_RT_CTL_TDOWN);
902 break;
903 case DMA_MEM_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300904 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200905 UDMA_CHAN_RT_CTL_EN |
906 UDMA_CHAN_RT_CTL_TDOWN);
907 break;
908 default:
909 uc->state = old_state;
910 complete_all(&uc->teardown_completed);
911 return -EINVAL;
912 }
913
914 return 0;
915}
916
917static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
918{
919 struct udma_desc *d = uc->desc;
920 struct cppi5_host_desc_t *h_desc;
921
922 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
923 cppi5_hdesc_reset_to_original(h_desc);
924 udma_push_to_ring(uc, d->desc_idx);
925 d->desc_idx = (d->desc_idx + 1) % d->sglen;
926}
927
928static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
929{
930 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
931
932 memcpy(d->metadata, h_desc->epib, d->metadata_size);
933}
934
935static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
936{
937 u32 peer_bcnt, bcnt;
938
939 /* Only TX towards PDMA is affected */
940 if (uc->config.ep_type == PSIL_EP_NATIVE ||
941 uc->config.dir != DMA_MEM_TO_DEV)
942 return true;
943
Peter Ujfalusidb375dc2020-07-07 13:23:52 +0300944 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
945 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200946
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200947 /* Transfer is incomplete, store current residue and time stamp */
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200948 if (peer_bcnt < bcnt) {
949 uc->tx_drain.residue = bcnt - peer_bcnt;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200950 uc->tx_drain.tstamp = ktime_get();
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200951 return false;
952 }
953
954 return true;
955}
956
957static void udma_check_tx_completion(struct work_struct *work)
958{
959 struct udma_chan *uc = container_of(work, typeof(*uc),
960 tx_drain.work.work);
961 bool desc_done = true;
962 u32 residue_diff;
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200963 ktime_t time_diff;
964 unsigned long delay;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200965
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200966 while (1) {
967 if (uc->desc) {
968 /* Get previous residue and time stamp */
969 residue_diff = uc->tx_drain.residue;
970 time_diff = uc->tx_drain.tstamp;
971 /*
972 * Get current residue and time stamp or see if
973 * transfer is complete
974 */
975 desc_done = udma_is_desc_really_done(uc, uc->desc);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +0200976 }
977
Vignesh Raghavendra1c837672020-02-14 11:14:36 +0200978 if (!desc_done) {
979 /*
980 * Find the time delta and residue delta w.r.t
981 * previous poll
982 */
983 time_diff = ktime_sub(uc->tx_drain.tstamp,
984 time_diff) + 1;
985 residue_diff -= uc->tx_drain.residue;
986 if (residue_diff) {
987 /*
988 * Try to guess when we should check
989 * next time by calculating rate at
990 * which data is being drained at the
991 * peer device
992 */
993 delay = (time_diff / residue_diff) *
994 uc->tx_drain.residue;
995 } else {
996 /* No progress, check again in 1 second */
997 schedule_delayed_work(&uc->tx_drain.work, HZ);
998 break;
999 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001000
Vignesh Raghavendra1c837672020-02-14 11:14:36 +02001001 usleep_range(ktime_to_us(delay),
1002 ktime_to_us(delay) + 10);
1003 continue;
1004 }
1005
1006 if (uc->desc) {
1007 struct udma_desc *d = uc->desc;
1008
1009 uc->bcnt += d->residue;
1010 udma_start(uc);
1011 vchan_cookie_complete(&d->vd);
1012 break;
1013 }
1014
1015 break;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001016 }
1017}
1018
1019static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1020{
1021 struct udma_chan *uc = data;
1022 struct udma_desc *d;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001023 dma_addr_t paddr = 0;
1024
1025 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1026 return IRQ_HANDLED;
1027
Barry Songe991c062020-10-28 10:52:44 +13001028 spin_lock(&uc->vc.lock);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001029
1030 /* Teardown completion message */
1031 if (cppi5_desc_is_tdcm(paddr)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001032 complete_all(&uc->teardown_completed);
1033
1034 if (uc->terminated_desc) {
1035 udma_desc_free(&uc->terminated_desc->vd);
1036 uc->terminated_desc = NULL;
1037 }
1038
1039 if (!uc->desc)
1040 udma_start(uc);
1041
1042 goto out;
1043 }
1044
1045 d = udma_udma_desc_from_paddr(uc, paddr);
1046
1047 if (d) {
1048 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1049 d->desc_idx);
1050 if (desc_paddr != paddr) {
1051 dev_err(uc->ud->dev, "not matching descriptors!\n");
1052 goto out;
1053 }
1054
Peter Ujfalusi83903182020-02-14 11:14:41 +02001055 if (d == uc->desc) {
1056 /* active descriptor */
1057 if (uc->cyclic) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001058 udma_cyclic_packet_elapsed(uc);
1059 vchan_cyclic_callback(&d->vd);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001060 } else {
1061 if (udma_is_desc_really_done(uc, d)) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001062 uc->bcnt += d->residue;
1063 udma_start(uc);
Peter Ujfalusi83903182020-02-14 11:14:41 +02001064 vchan_cookie_complete(&d->vd);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001065 } else {
1066 schedule_delayed_work(&uc->tx_drain.work,
1067 0);
1068 }
1069 }
Peter Ujfalusi83903182020-02-14 11:14:41 +02001070 } else {
1071 /*
1072 * terminated descriptor, mark the descriptor as
1073 * completed to update the channel's cookie marker
1074 */
1075 dma_cookie_complete(&d->vd.tx);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001076 }
1077 }
1078out:
Barry Songe991c062020-10-28 10:52:44 +13001079 spin_unlock(&uc->vc.lock);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001080
1081 return IRQ_HANDLED;
1082}
1083
1084static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1085{
1086 struct udma_chan *uc = data;
1087 struct udma_desc *d;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001088
Barry Songe991c062020-10-28 10:52:44 +13001089 spin_lock(&uc->vc.lock);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001090 d = uc->desc;
1091 if (d) {
1092 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1093
1094 if (uc->cyclic) {
1095 vchan_cyclic_callback(&d->vd);
1096 } else {
1097 /* TODO: figure out the real amount of data */
1098 uc->bcnt += d->residue;
1099 udma_start(uc);
1100 vchan_cookie_complete(&d->vd);
1101 }
1102 }
1103
Barry Songe991c062020-10-28 10:52:44 +13001104 spin_unlock(&uc->vc.lock);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001105
1106 return IRQ_HANDLED;
1107}
1108
Grygorii Strashkod7024192019-12-23 13:04:51 +02001109/**
1110 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1111 * @ud: UDMA device
1112 * @from: Start the search from this flow id number
1113 * @cnt: Number of consecutive flow ids to allocate
1114 *
1115 * Allocate range of RX flow ids for future use, those flows can be requested
1116 * only using explicit flow id number. if @from is set to -1 it will try to find
1117 * first free range. if @from is positive value it will force allocation only
1118 * of the specified range of flows.
1119 *
1120 * Returns -ENOMEM if can't find free range.
1121 * -EEXIST if requested range is busy.
1122 * -EINVAL if wrong input values passed.
1123 * Returns flow id on success.
1124 */
1125static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1126{
1127 int start, tmp_from;
1128 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1129
1130 tmp_from = from;
1131 if (tmp_from < 0)
1132 tmp_from = ud->rchan_cnt;
1133 /* default flows can't be allocated and accessible only by id */
1134 if (tmp_from < ud->rchan_cnt)
1135 return -EINVAL;
1136
1137 if (tmp_from + cnt > ud->rflow_cnt)
1138 return -EINVAL;
1139
1140 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1141 ud->rflow_cnt);
1142
1143 start = bitmap_find_next_zero_area(tmp,
1144 ud->rflow_cnt,
1145 tmp_from, cnt, 0);
1146 if (start >= ud->rflow_cnt)
1147 return -ENOMEM;
1148
1149 if (from >= 0 && start != from)
1150 return -EEXIST;
1151
1152 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1153 return start;
1154}
1155
1156static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1157{
1158 if (from < ud->rchan_cnt)
1159 return -EINVAL;
1160 if (from + cnt > ud->rflow_cnt)
1161 return -EINVAL;
1162
1163 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1164 return 0;
1165}
1166
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001167static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1168{
1169 /*
1170 * Attempt to request rflow by ID can be made for any rflow
1171 * if not in use with assumption that caller knows what's doing.
1172 * TI-SCI FW will perform additional permission check ant way, it's
1173 * safe
1174 */
1175
1176 if (id < 0 || id >= ud->rflow_cnt)
1177 return ERR_PTR(-ENOENT);
1178
1179 if (test_bit(id, ud->rflow_in_use))
1180 return ERR_PTR(-ENOENT);
1181
1182 /* GP rflow has to be allocated first */
1183 if (!test_bit(id, ud->rflow_gp_map) &&
1184 !test_bit(id, ud->rflow_gp_map_allocated))
1185 return ERR_PTR(-EINVAL);
1186
1187 dev_dbg(ud->dev, "get rflow%d\n", id);
1188 set_bit(id, ud->rflow_in_use);
1189 return &ud->rflows[id];
1190}
1191
1192static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1193{
1194 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1195 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1196 return;
1197 }
1198
1199 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1200 clear_bit(rflow->id, ud->rflow_in_use);
1201}
1202
1203#define UDMA_RESERVE_RESOURCE(res) \
1204static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1205 enum udma_tp_level tpl, \
1206 int id) \
1207{ \
1208 if (id >= 0) { \
1209 if (test_bit(id, ud->res##_map)) { \
1210 dev_err(ud->dev, "res##%d is in use\n", id); \
1211 return ERR_PTR(-ENOENT); \
1212 } \
1213 } else { \
1214 int start; \
1215 \
Peter Ujfalusidaf4ad02020-07-17 15:09:03 +03001216 if (tpl >= ud->tpl_levels) \
1217 tpl = ud->tpl_levels - 1; \
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001218 \
Peter Ujfalusidaf4ad02020-07-17 15:09:03 +03001219 start = ud->tpl_start_idx[tpl]; \
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001220 \
1221 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1222 start); \
1223 if (id == ud->res##_cnt) { \
1224 return ERR_PTR(-ENOENT); \
1225 } \
1226 } \
1227 \
1228 set_bit(id, ud->res##_map); \
1229 return &ud->res##s[id]; \
1230}
1231
1232UDMA_RESERVE_RESOURCE(tchan);
1233UDMA_RESERVE_RESOURCE(rchan);
1234
1235static int udma_get_tchan(struct udma_chan *uc)
1236{
1237 struct udma_dev *ud = uc->ud;
1238
1239 if (uc->tchan) {
1240 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1241 uc->id, uc->tchan->id);
1242 return 0;
1243 }
1244
1245 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001246
Samuel Zou214a0002020-05-06 17:25:46 +08001247 return PTR_ERR_OR_ZERO(uc->tchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001248}
1249
1250static int udma_get_rchan(struct udma_chan *uc)
1251{
1252 struct udma_dev *ud = uc->ud;
1253
1254 if (uc->rchan) {
1255 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1256 uc->id, uc->rchan->id);
1257 return 0;
1258 }
1259
1260 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001261
Samuel Zou214a0002020-05-06 17:25:46 +08001262 return PTR_ERR_OR_ZERO(uc->rchan);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001263}
1264
1265static int udma_get_chan_pair(struct udma_chan *uc)
1266{
1267 struct udma_dev *ud = uc->ud;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001268 int chan_id, end;
1269
1270 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1271 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1272 uc->id, uc->tchan->id);
1273 return 0;
1274 }
1275
1276 if (uc->tchan) {
1277 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1278 uc->id, uc->tchan->id);
1279 return -EBUSY;
1280 } else if (uc->rchan) {
1281 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1282 uc->id, uc->rchan->id);
1283 return -EBUSY;
1284 }
1285
1286 /* Can be optimized, but let's have it like this for now */
1287 end = min(ud->tchan_cnt, ud->rchan_cnt);
1288 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
Peter Ujfalusidaf4ad02020-07-17 15:09:03 +03001289 chan_id = ud->tpl_start_idx[ud->tpl_levels - 1];
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001290 for (; chan_id < end; chan_id++) {
1291 if (!test_bit(chan_id, ud->tchan_map) &&
1292 !test_bit(chan_id, ud->rchan_map))
1293 break;
1294 }
1295
1296 if (chan_id == end)
1297 return -ENOENT;
1298
1299 set_bit(chan_id, ud->tchan_map);
1300 set_bit(chan_id, ud->rchan_map);
1301 uc->tchan = &ud->tchans[chan_id];
1302 uc->rchan = &ud->rchans[chan_id];
1303
1304 return 0;
1305}
1306
1307static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1308{
1309 struct udma_dev *ud = uc->ud;
1310
1311 if (!uc->rchan) {
1312 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1313 return -EINVAL;
1314 }
1315
1316 if (uc->rflow) {
1317 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1318 uc->id, uc->rflow->id);
1319 return 0;
1320 }
1321
1322 uc->rflow = __udma_get_rflow(ud, flow_id);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001323
Samuel Zou214a0002020-05-06 17:25:46 +08001324 return PTR_ERR_OR_ZERO(uc->rflow);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001325}
1326
1327static void udma_put_rchan(struct udma_chan *uc)
1328{
1329 struct udma_dev *ud = uc->ud;
1330
1331 if (uc->rchan) {
1332 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1333 uc->rchan->id);
1334 clear_bit(uc->rchan->id, ud->rchan_map);
1335 uc->rchan = NULL;
1336 }
1337}
1338
1339static void udma_put_tchan(struct udma_chan *uc)
1340{
1341 struct udma_dev *ud = uc->ud;
1342
1343 if (uc->tchan) {
1344 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1345 uc->tchan->id);
1346 clear_bit(uc->tchan->id, ud->tchan_map);
1347 uc->tchan = NULL;
1348 }
1349}
1350
1351static void udma_put_rflow(struct udma_chan *uc)
1352{
1353 struct udma_dev *ud = uc->ud;
1354
1355 if (uc->rflow) {
1356 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1357 uc->rflow->id);
1358 __udma_put_rflow(ud, uc->rflow);
1359 uc->rflow = NULL;
1360 }
1361}
1362
1363static void udma_free_tx_resources(struct udma_chan *uc)
1364{
1365 if (!uc->tchan)
1366 return;
1367
1368 k3_ringacc_ring_free(uc->tchan->t_ring);
1369 k3_ringacc_ring_free(uc->tchan->tc_ring);
1370 uc->tchan->t_ring = NULL;
1371 uc->tchan->tc_ring = NULL;
1372
1373 udma_put_tchan(uc);
1374}
1375
1376static int udma_alloc_tx_resources(struct udma_chan *uc)
1377{
1378 struct k3_ring_cfg ring_cfg;
1379 struct udma_dev *ud = uc->ud;
1380 int ret;
1381
1382 ret = udma_get_tchan(uc);
1383 if (ret)
1384 return ret;
1385
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -07001386 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
1387 &uc->tchan->t_ring,
1388 &uc->tchan->tc_ring);
1389 if (ret) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001390 ret = -EBUSY;
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -07001391 goto err_ring;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001392 }
1393
1394 memset(&ring_cfg, 0, sizeof(ring_cfg));
1395 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1396 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1397 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1398
1399 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1400 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1401
1402 if (ret)
1403 goto err_ringcfg;
1404
1405 return 0;
1406
1407err_ringcfg:
1408 k3_ringacc_ring_free(uc->tchan->tc_ring);
1409 uc->tchan->tc_ring = NULL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001410 k3_ringacc_ring_free(uc->tchan->t_ring);
1411 uc->tchan->t_ring = NULL;
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -07001412err_ring:
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001413 udma_put_tchan(uc);
1414
1415 return ret;
1416}
1417
1418static void udma_free_rx_resources(struct udma_chan *uc)
1419{
1420 if (!uc->rchan)
1421 return;
1422
1423 if (uc->rflow) {
1424 struct udma_rflow *rflow = uc->rflow;
1425
1426 k3_ringacc_ring_free(rflow->fd_ring);
1427 k3_ringacc_ring_free(rflow->r_ring);
1428 rflow->fd_ring = NULL;
1429 rflow->r_ring = NULL;
1430
1431 udma_put_rflow(uc);
1432 }
1433
1434 udma_put_rchan(uc);
1435}
1436
1437static int udma_alloc_rx_resources(struct udma_chan *uc)
1438{
1439 struct udma_dev *ud = uc->ud;
1440 struct k3_ring_cfg ring_cfg;
1441 struct udma_rflow *rflow;
1442 int fd_ring_id;
1443 int ret;
1444
1445 ret = udma_get_rchan(uc);
1446 if (ret)
1447 return ret;
1448
1449 /* For MEM_TO_MEM we don't need rflow or rings */
1450 if (uc->config.dir == DMA_MEM_TO_MEM)
1451 return 0;
1452
1453 ret = udma_get_rflow(uc, uc->rchan->id);
1454 if (ret) {
1455 ret = -EBUSY;
1456 goto err_rflow;
1457 }
1458
1459 rflow = uc->rflow;
1460 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -07001461 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1462 &rflow->fd_ring, &rflow->r_ring);
1463 if (ret) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001464 ret = -EBUSY;
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -07001465 goto err_ring;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001466 }
1467
1468 memset(&ring_cfg, 0, sizeof(ring_cfg));
1469
1470 if (uc->config.pkt_mode)
1471 ring_cfg.size = SG_MAX_SEGMENTS;
1472 else
1473 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1474
1475 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1476 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1477
1478 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1479 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1480 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1481
1482 if (ret)
1483 goto err_ringcfg;
1484
1485 return 0;
1486
1487err_ringcfg:
1488 k3_ringacc_ring_free(rflow->r_ring);
1489 rflow->r_ring = NULL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001490 k3_ringacc_ring_free(rflow->fd_ring);
1491 rflow->fd_ring = NULL;
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -07001492err_ring:
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001493 udma_put_rflow(uc);
1494err_rflow:
1495 udma_put_rchan(uc);
1496
1497 return ret;
1498}
1499
1500#define TISCI_TCHAN_VALID_PARAMS ( \
1501 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1502 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1503 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1504 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1505 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1506 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001507 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1508 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001509
1510#define TISCI_RCHAN_VALID_PARAMS ( \
1511 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1512 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1513 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1514 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1515 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1516 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1517 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001518 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1519 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001520
1521static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1522{
1523 struct udma_dev *ud = uc->ud;
1524 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1525 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1526 struct udma_tchan *tchan = uc->tchan;
1527 struct udma_rchan *rchan = uc->rchan;
1528 int ret = 0;
1529
1530 /* Non synchronized - mem to mem type of transfer */
1531 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1532 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1533 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1534
1535 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1536 req_tx.nav_id = tisci_rm->tisci_dev_id;
1537 req_tx.index = tchan->id;
1538 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1539 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1540 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001541 req_tx.tx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001542
1543 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1544 if (ret) {
1545 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1546 return ret;
1547 }
1548
1549 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1550 req_rx.nav_id = tisci_rm->tisci_dev_id;
1551 req_rx.index = rchan->id;
1552 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1553 req_rx.rxcq_qnum = tc_ring;
1554 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001555 req_rx.rx_atype = ud->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001556
1557 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1558 if (ret)
1559 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1560
1561 return ret;
1562}
1563
1564static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1565{
1566 struct udma_dev *ud = uc->ud;
1567 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1568 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1569 struct udma_tchan *tchan = uc->tchan;
1570 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1571 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1572 u32 mode, fetch_size;
1573 int ret = 0;
1574
1575 if (uc->config.pkt_mode) {
1576 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1577 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1578 uc->config.psd_size, 0);
1579 } else {
1580 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1581 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1582 }
1583
1584 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1585 req_tx.nav_id = tisci_rm->tisci_dev_id;
1586 req_tx.index = tchan->id;
1587 req_tx.tx_chan_type = mode;
1588 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1589 req_tx.tx_fetch_size = fetch_size >> 2;
1590 req_tx.txcq_qnum = tc_ring;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001591 req_tx.tx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001592
1593 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1594 if (ret)
1595 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1596
1597 return ret;
1598}
1599
1600static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1601{
1602 struct udma_dev *ud = uc->ud;
1603 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1604 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1605 struct udma_rchan *rchan = uc->rchan;
1606 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1607 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1608 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1609 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1610 u32 mode, fetch_size;
1611 int ret = 0;
1612
1613 if (uc->config.pkt_mode) {
1614 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1615 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1616 uc->config.psd_size, 0);
1617 } else {
1618 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1619 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1620 }
1621
1622 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1623 req_rx.nav_id = tisci_rm->tisci_dev_id;
1624 req_rx.index = rchan->id;
1625 req_rx.rx_fetch_size = fetch_size >> 2;
1626 req_rx.rxcq_qnum = rx_ring;
1627 req_rx.rx_chan_type = mode;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02001628 req_rx.rx_atype = uc->config.atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001629
1630 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1631 if (ret) {
1632 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1633 return ret;
1634 }
1635
1636 flow_req.valid_params =
1637 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1638 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1639 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1640 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1641 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1642 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1643 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1644 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1645 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1646 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1647 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1648 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1649 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1650
1651 flow_req.nav_id = tisci_rm->tisci_dev_id;
1652 flow_req.flow_index = rchan->id;
1653
1654 if (uc->config.needs_epib)
1655 flow_req.rx_einfo_present = 1;
1656 else
1657 flow_req.rx_einfo_present = 0;
1658 if (uc->config.psd_size)
1659 flow_req.rx_psinfo_present = 1;
1660 else
1661 flow_req.rx_psinfo_present = 0;
1662 flow_req.rx_error_handling = 1;
1663 flow_req.rx_dest_qnum = rx_ring;
1664 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1665 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1666 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1667 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1668 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1669 flow_req.rx_fdq1_qnum = fd_ring;
1670 flow_req.rx_fdq2_qnum = fd_ring;
1671 flow_req.rx_fdq3_qnum = fd_ring;
1672
1673 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1674
1675 if (ret)
1676 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1677
1678 return 0;
1679}
1680
1681static int udma_alloc_chan_resources(struct dma_chan *chan)
1682{
1683 struct udma_chan *uc = to_udma_chan(chan);
1684 struct udma_dev *ud = to_udma_dev(chan->device);
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +03001685 const struct udma_soc_data *soc_data = ud->soc_data;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001686 struct k3_ring *irq_ring;
1687 u32 irq_udma_idx;
1688 int ret;
1689
1690 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1691 uc->use_dma_pool = true;
1692 /* in case of MEM_TO_MEM we have maximum of two TRs */
1693 if (uc->config.dir == DMA_MEM_TO_MEM) {
1694 uc->config.hdesc_size = cppi5_trdesc_calc_size(
1695 sizeof(struct cppi5_tr_type15_t), 2);
1696 uc->config.pkt_mode = false;
1697 }
1698 }
1699
1700 if (uc->use_dma_pool) {
1701 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1702 uc->config.hdesc_size,
1703 ud->desc_align,
1704 0);
1705 if (!uc->hdesc_pool) {
1706 dev_err(ud->ddev.dev,
1707 "Descriptor pool allocation failed\n");
1708 uc->use_dma_pool = false;
Peter Ujfalusi5a9377c2020-05-27 10:06:11 +03001709 ret = -ENOMEM;
1710 goto err_cleanup;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001711 }
1712 }
1713
1714 /*
1715 * Make sure that the completion is in a known state:
1716 * No teardown, the channel is idle
1717 */
1718 reinit_completion(&uc->teardown_completed);
1719 complete_all(&uc->teardown_completed);
1720 uc->state = UDMA_CHAN_IS_IDLE;
1721
1722 switch (uc->config.dir) {
1723 case DMA_MEM_TO_MEM:
1724 /* Non synchronized - mem to mem type of transfer */
1725 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1726 uc->id);
1727
1728 ret = udma_get_chan_pair(uc);
1729 if (ret)
Peter Ujfalusi5a9377c2020-05-27 10:06:11 +03001730 goto err_cleanup;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001731
1732 ret = udma_alloc_tx_resources(uc);
Peter Ujfalusi5a9377c2020-05-27 10:06:11 +03001733 if (ret) {
1734 udma_put_rchan(uc);
1735 goto err_cleanup;
1736 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001737
1738 ret = udma_alloc_rx_resources(uc);
1739 if (ret) {
1740 udma_free_tx_resources(uc);
Peter Ujfalusi5a9377c2020-05-27 10:06:11 +03001741 goto err_cleanup;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001742 }
1743
1744 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1745 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1746 K3_PSIL_DST_THREAD_ID_OFFSET;
1747
1748 irq_ring = uc->tchan->tc_ring;
1749 irq_udma_idx = uc->tchan->id;
1750
1751 ret = udma_tisci_m2m_channel_config(uc);
1752 break;
1753 case DMA_MEM_TO_DEV:
1754 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1755 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1756 uc->id);
1757
1758 ret = udma_alloc_tx_resources(uc);
Peter Ujfalusi5a9377c2020-05-27 10:06:11 +03001759 if (ret)
1760 goto err_cleanup;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001761
1762 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1763 uc->config.dst_thread = uc->config.remote_thread_id;
1764 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1765
1766 irq_ring = uc->tchan->tc_ring;
1767 irq_udma_idx = uc->tchan->id;
1768
1769 ret = udma_tisci_tx_channel_config(uc);
1770 break;
1771 case DMA_DEV_TO_MEM:
1772 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1773 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1774 uc->id);
1775
1776 ret = udma_alloc_rx_resources(uc);
Peter Ujfalusi5a9377c2020-05-27 10:06:11 +03001777 if (ret)
1778 goto err_cleanup;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001779
1780 uc->config.src_thread = uc->config.remote_thread_id;
1781 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1782 K3_PSIL_DST_THREAD_ID_OFFSET;
1783
1784 irq_ring = uc->rflow->r_ring;
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +03001785 irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001786
1787 ret = udma_tisci_rx_channel_config(uc);
1788 break;
1789 default:
1790 /* Can not happen */
1791 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1792 __func__, uc->id, uc->config.dir);
Peter Ujfalusi5a9377c2020-05-27 10:06:11 +03001793 ret = -EINVAL;
1794 goto err_cleanup;
1795
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001796 }
1797
1798 /* check if the channel configuration was successful */
1799 if (ret)
1800 goto err_res_free;
1801
1802 if (udma_is_chan_running(uc)) {
1803 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
Peter Ujfalusib5b01802020-05-27 10:06:12 +03001804 udma_reset_chan(uc, false);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001805 if (udma_is_chan_running(uc)) {
1806 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
Peter Ujfalusi7ae6d7b2020-05-12 16:45:19 +03001807 ret = -EBUSY;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001808 goto err_res_free;
1809 }
1810 }
1811
1812 /* PSI-L pairing */
1813 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1814 if (ret) {
1815 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1816 uc->config.src_thread, uc->config.dst_thread);
1817 goto err_res_free;
1818 }
1819
1820 uc->psil_paired = true;
1821
1822 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1823 if (uc->irq_num_ring <= 0) {
1824 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1825 k3_ringacc_get_ring_id(irq_ring));
1826 ret = -EINVAL;
1827 goto err_psi_free;
1828 }
1829
1830 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1831 IRQF_TRIGGER_HIGH, uc->name, uc);
1832 if (ret) {
1833 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1834 goto err_irq_free;
1835 }
1836
1837 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1838 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1839 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1840 irq_udma_idx);
1841 if (uc->irq_num_udma <= 0) {
1842 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1843 irq_udma_idx);
1844 free_irq(uc->irq_num_ring, uc);
1845 ret = -EINVAL;
1846 goto err_irq_free;
1847 }
1848
1849 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1850 uc->name, uc);
1851 if (ret) {
1852 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1853 uc->id);
1854 free_irq(uc->irq_num_ring, uc);
1855 goto err_irq_free;
1856 }
1857 } else {
1858 uc->irq_num_udma = 0;
1859 }
1860
1861 udma_reset_rings(uc);
1862
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001863 return 0;
1864
1865err_irq_free:
1866 uc->irq_num_ring = 0;
1867 uc->irq_num_udma = 0;
1868err_psi_free:
1869 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1870 uc->psil_paired = false;
1871err_res_free:
1872 udma_free_tx_resources(uc);
1873 udma_free_rx_resources(uc);
Peter Ujfalusi5a9377c2020-05-27 10:06:11 +03001874err_cleanup:
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02001875 udma_reset_uchan(uc);
1876
1877 if (uc->use_dma_pool) {
1878 dma_pool_destroy(uc->hdesc_pool);
1879 uc->use_dma_pool = false;
1880 }
1881
1882 return ret;
1883}
1884
1885static int udma_slave_config(struct dma_chan *chan,
1886 struct dma_slave_config *cfg)
1887{
1888 struct udma_chan *uc = to_udma_chan(chan);
1889
1890 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1891
1892 return 0;
1893}
1894
1895static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1896 size_t tr_size, int tr_count,
1897 enum dma_transfer_direction dir)
1898{
1899 struct udma_hwdesc *hwdesc;
1900 struct cppi5_desc_hdr_t *tr_desc;
1901 struct udma_desc *d;
1902 u32 reload_count = 0;
1903 u32 ring_id;
1904
1905 switch (tr_size) {
1906 case 16:
1907 case 32:
1908 case 64:
1909 case 128:
1910 break;
1911 default:
1912 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1913 return NULL;
1914 }
1915
1916 /* We have only one descriptor containing multiple TRs */
1917 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1918 if (!d)
1919 return NULL;
1920
1921 d->sglen = tr_count;
1922
1923 d->hwdesc_count = 1;
1924 hwdesc = &d->hwdesc[0];
1925
1926 /* Allocate memory for DMA ring descriptor */
1927 if (uc->use_dma_pool) {
1928 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1929 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1930 GFP_NOWAIT,
1931 &hwdesc->cppi5_desc_paddr);
1932 } else {
1933 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1934 tr_count);
1935 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1936 uc->ud->desc_align);
1937 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1938 hwdesc->cppi5_desc_size,
1939 &hwdesc->cppi5_desc_paddr,
1940 GFP_NOWAIT);
1941 }
1942
1943 if (!hwdesc->cppi5_desc_vaddr) {
1944 kfree(d);
1945 return NULL;
1946 }
1947
1948 /* Start of the TR req records */
1949 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
1950 /* Start address of the TR response array */
1951 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
1952
1953 tr_desc = hwdesc->cppi5_desc_vaddr;
1954
1955 if (uc->cyclic)
1956 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
1957
1958 if (dir == DMA_DEV_TO_MEM)
1959 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1960 else
1961 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
1962
1963 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
1964 cppi5_desc_set_pktids(tr_desc, uc->id,
1965 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
1966 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
1967
1968 return d;
1969}
1970
Peter Ujfalusia9793402020-02-14 11:14:38 +02001971/**
1972 * udma_get_tr_counters - calculate TR counters for a given length
1973 * @len: Length of the trasnfer
1974 * @align_to: Preferred alignment
1975 * @tr0_cnt0: First TR icnt0
1976 * @tr0_cnt1: First TR icnt1
1977 * @tr1_cnt0: Second (if used) TR icnt0
1978 *
1979 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
1980 * For len >= SZ_64K two TRs are used in a simple way:
1981 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
1982 * Second TR: the remaining length (tr1_cnt0)
1983 *
1984 * Returns the number of TRs the length needs (1 or 2)
1985 * -EINVAL if the length can not be supported
1986 */
1987static int udma_get_tr_counters(size_t len, unsigned long align_to,
1988 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
1989{
1990 if (len < SZ_64K) {
1991 *tr0_cnt0 = len;
1992 *tr0_cnt1 = 1;
1993
1994 return 1;
1995 }
1996
1997 if (align_to > 3)
1998 align_to = 3;
1999
2000realign:
2001 *tr0_cnt0 = SZ_64K - BIT(align_to);
2002 if (len / *tr0_cnt0 >= SZ_64K) {
2003 if (align_to) {
2004 align_to--;
2005 goto realign;
2006 }
2007 return -EINVAL;
2008 }
2009
2010 *tr0_cnt1 = len / *tr0_cnt0;
2011 *tr1_cnt0 = len % *tr0_cnt0;
2012
2013 return 2;
2014}
2015
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002016static struct udma_desc *
2017udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2018 unsigned int sglen, enum dma_transfer_direction dir,
2019 unsigned long tx_flags, void *context)
2020{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002021 struct scatterlist *sgent;
2022 struct udma_desc *d;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002023 struct cppi5_tr_type1_t *tr_req = NULL;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002024 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002025 unsigned int i;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002026 size_t tr_size;
2027 int num_tr = 0;
2028 int tr_idx = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002029
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002030 /* estimate the number of TRs we will need */
2031 for_each_sg(sgl, sgent, sglen, i) {
2032 if (sg_dma_len(sgent) < SZ_64K)
2033 num_tr++;
2034 else
2035 num_tr += 2;
2036 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002037
2038 /* Now allocate and setup the descriptor. */
2039 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002040 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002041 if (!d)
2042 return NULL;
2043
2044 d->sglen = sglen;
2045
2046 tr_req = d->hwdesc[0].tr_req_base;
2047 for_each_sg(sgl, sgent, sglen, i) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002048 dma_addr_t sg_addr = sg_dma_address(sgent);
2049
2050 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2051 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2052 if (num_tr < 0) {
2053 dev_err(uc->ud->dev, "size %u is not supported\n",
2054 sg_dma_len(sgent));
2055 udma_free_hwdesc(uc, d);
2056 kfree(d);
2057 return NULL;
2058 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002059
Peter Ujfalusi33ebffa2020-08-24 15:01:08 +03002060 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2061 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2062 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002063
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002064 tr_req[tr_idx].addr = sg_addr;
2065 tr_req[tr_idx].icnt0 = tr0_cnt0;
2066 tr_req[tr_idx].icnt1 = tr0_cnt1;
2067 tr_req[tr_idx].dim1 = tr0_cnt0;
2068 tr_idx++;
2069
2070 if (num_tr == 2) {
2071 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2072 false, false,
2073 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2074 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2075 CPPI5_TR_CSF_SUPR_EVT);
2076
2077 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2078 tr_req[tr_idx].icnt0 = tr1_cnt0;
2079 tr_req[tr_idx].icnt1 = 1;
2080 tr_req[tr_idx].dim1 = tr1_cnt0;
2081 tr_idx++;
2082 }
2083
2084 d->residue += sg_dma_len(sgent);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002085 }
2086
Peter Ujfalusibe4054b2020-05-12 16:45:31 +03002087 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2088 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002089
2090 return d;
2091}
2092
2093static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2094 enum dma_slave_buswidth dev_width,
2095 u16 elcnt)
2096{
2097 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2098 return 0;
2099
2100 /* Bus width translates to the element size (ES) */
2101 switch (dev_width) {
2102 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2103 d->static_tr.elsize = 0;
2104 break;
2105 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2106 d->static_tr.elsize = 1;
2107 break;
2108 case DMA_SLAVE_BUSWIDTH_3_BYTES:
2109 d->static_tr.elsize = 2;
2110 break;
2111 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2112 d->static_tr.elsize = 3;
2113 break;
2114 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2115 d->static_tr.elsize = 4;
2116 break;
2117 default: /* not reached */
2118 return -EINVAL;
2119 }
2120
2121 d->static_tr.elcnt = elcnt;
2122
2123 /*
2124 * PDMA must to close the packet when the channel is in packet mode.
2125 * For TR mode when the channel is not cyclic we also need PDMA to close
2126 * the packet otherwise the transfer will stall because PDMA holds on
2127 * the data it has received from the peripheral.
2128 */
2129 if (uc->config.pkt_mode || !uc->cyclic) {
2130 unsigned int div = dev_width * elcnt;
2131
2132 if (uc->cyclic)
2133 d->static_tr.bstcnt = d->residue / d->sglen / div;
2134 else
2135 d->static_tr.bstcnt = d->residue / div;
2136
2137 if (uc->config.dir == DMA_DEV_TO_MEM &&
2138 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2139 return -EINVAL;
2140 } else {
2141 d->static_tr.bstcnt = 0;
2142 }
2143
2144 return 0;
2145}
2146
2147static struct udma_desc *
2148udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2149 unsigned int sglen, enum dma_transfer_direction dir,
2150 unsigned long tx_flags, void *context)
2151{
2152 struct scatterlist *sgent;
2153 struct cppi5_host_desc_t *h_desc = NULL;
2154 struct udma_desc *d;
2155 u32 ring_id;
2156 unsigned int i;
2157
Gustavo A. R. Silvaace52a8c2020-06-19 17:43:34 -05002158 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002159 if (!d)
2160 return NULL;
2161
2162 d->sglen = sglen;
2163 d->hwdesc_count = sglen;
2164
2165 if (dir == DMA_DEV_TO_MEM)
2166 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2167 else
2168 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2169
2170 for_each_sg(sgl, sgent, sglen, i) {
2171 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2172 dma_addr_t sg_addr = sg_dma_address(sgent);
2173 struct cppi5_host_desc_t *desc;
2174 size_t sg_len = sg_dma_len(sgent);
2175
2176 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2177 GFP_NOWAIT,
2178 &hwdesc->cppi5_desc_paddr);
2179 if (!hwdesc->cppi5_desc_vaddr) {
2180 dev_err(uc->ud->dev,
2181 "descriptor%d allocation failed\n", i);
2182
2183 udma_free_hwdesc(uc, d);
2184 kfree(d);
2185 return NULL;
2186 }
2187
2188 d->residue += sg_len;
2189 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2190 desc = hwdesc->cppi5_desc_vaddr;
2191
2192 if (i == 0) {
2193 cppi5_hdesc_init(desc, 0, 0);
2194 /* Flow and Packed ID */
2195 cppi5_desc_set_pktids(&desc->hdr, uc->id,
2196 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2197 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2198 } else {
2199 cppi5_hdesc_reset_hbdesc(desc);
2200 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2201 }
2202
2203 /* attach the sg buffer to the descriptor */
2204 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2205
2206 /* Attach link as host buffer descriptor */
2207 if (h_desc)
2208 cppi5_hdesc_link_hbdesc(h_desc,
2209 hwdesc->cppi5_desc_paddr);
2210
2211 if (dir == DMA_MEM_TO_DEV)
2212 h_desc = desc;
2213 }
2214
2215 if (d->residue >= SZ_4M) {
2216 dev_err(uc->ud->dev,
2217 "%s: Transfer size %u is over the supported 4M range\n",
2218 __func__, d->residue);
2219 udma_free_hwdesc(uc, d);
2220 kfree(d);
2221 return NULL;
2222 }
2223
2224 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2225 cppi5_hdesc_set_pktlen(h_desc, d->residue);
2226
2227 return d;
2228}
2229
2230static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2231 void *data, size_t len)
2232{
2233 struct udma_desc *d = to_udma_desc(desc);
2234 struct udma_chan *uc = to_udma_chan(desc->chan);
2235 struct cppi5_host_desc_t *h_desc;
2236 u32 psd_size = len;
2237 u32 flags = 0;
2238
2239 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2240 return -ENOTSUPP;
2241
2242 if (!data || len > uc->config.metadata_size)
2243 return -EINVAL;
2244
2245 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2246 return -EINVAL;
2247
2248 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2249 if (d->dir == DMA_MEM_TO_DEV)
2250 memcpy(h_desc->epib, data, len);
2251
2252 if (uc->config.needs_epib)
2253 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2254
2255 d->metadata = data;
2256 d->metadata_size = len;
2257 if (uc->config.needs_epib)
2258 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2259
2260 cppi5_hdesc_update_flags(h_desc, flags);
2261 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2262
2263 return 0;
2264}
2265
2266static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2267 size_t *payload_len, size_t *max_len)
2268{
2269 struct udma_desc *d = to_udma_desc(desc);
2270 struct udma_chan *uc = to_udma_chan(desc->chan);
2271 struct cppi5_host_desc_t *h_desc;
2272
2273 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2274 return ERR_PTR(-ENOTSUPP);
2275
2276 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2277
2278 *max_len = uc->config.metadata_size;
2279
2280 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2281 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2282 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2283
2284 return h_desc->epib;
2285}
2286
2287static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2288 size_t payload_len)
2289{
2290 struct udma_desc *d = to_udma_desc(desc);
2291 struct udma_chan *uc = to_udma_chan(desc->chan);
2292 struct cppi5_host_desc_t *h_desc;
2293 u32 psd_size = payload_len;
2294 u32 flags = 0;
2295
2296 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2297 return -ENOTSUPP;
2298
2299 if (payload_len > uc->config.metadata_size)
2300 return -EINVAL;
2301
2302 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2303 return -EINVAL;
2304
2305 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2306
2307 if (uc->config.needs_epib) {
2308 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2309 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2310 }
2311
2312 cppi5_hdesc_update_flags(h_desc, flags);
2313 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2314
2315 return 0;
2316}
2317
2318static struct dma_descriptor_metadata_ops metadata_ops = {
2319 .attach = udma_attach_metadata,
2320 .get_ptr = udma_get_metadata_ptr,
2321 .set_len = udma_set_metadata_len,
2322};
2323
2324static struct dma_async_tx_descriptor *
2325udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2326 unsigned int sglen, enum dma_transfer_direction dir,
2327 unsigned long tx_flags, void *context)
2328{
2329 struct udma_chan *uc = to_udma_chan(chan);
2330 enum dma_slave_buswidth dev_width;
2331 struct udma_desc *d;
2332 u32 burst;
2333
2334 if (dir != uc->config.dir) {
2335 dev_err(chan->device->dev,
2336 "%s: chan%d is for %s, not supporting %s\n",
2337 __func__, uc->id,
2338 dmaengine_get_direction_text(uc->config.dir),
2339 dmaengine_get_direction_text(dir));
2340 return NULL;
2341 }
2342
2343 if (dir == DMA_DEV_TO_MEM) {
2344 dev_width = uc->cfg.src_addr_width;
2345 burst = uc->cfg.src_maxburst;
2346 } else if (dir == DMA_MEM_TO_DEV) {
2347 dev_width = uc->cfg.dst_addr_width;
2348 burst = uc->cfg.dst_maxburst;
2349 } else {
2350 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2351 return NULL;
2352 }
2353
2354 if (!burst)
2355 burst = 1;
2356
2357 if (uc->config.pkt_mode)
2358 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2359 context);
2360 else
2361 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2362 context);
2363
2364 if (!d)
2365 return NULL;
2366
2367 d->dir = dir;
2368 d->desc_idx = 0;
2369 d->tr_idx = 0;
2370
2371 /* static TR for remote PDMA */
2372 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2373 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002374 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002375 __func__, d->static_tr.bstcnt);
2376
2377 udma_free_hwdesc(uc, d);
2378 kfree(d);
2379 return NULL;
2380 }
2381
2382 if (uc->config.metadata_size)
2383 d->vd.tx.metadata_ops = &metadata_ops;
2384
2385 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2386}
2387
2388static struct udma_desc *
2389udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2390 size_t buf_len, size_t period_len,
2391 enum dma_transfer_direction dir, unsigned long flags)
2392{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002393 struct udma_desc *d;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002394 size_t tr_size, period_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002395 struct cppi5_tr_type1_t *tr_req;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002396 unsigned int periods = buf_len / period_len;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002397 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2398 unsigned int i;
2399 int num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002400
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002401 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2402 &tr0_cnt1, &tr1_cnt0);
2403 if (num_tr < 0) {
2404 dev_err(uc->ud->dev, "size %zu is not supported\n",
2405 period_len);
2406 return NULL;
2407 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002408
2409 /* Now allocate and setup the descriptor. */
2410 tr_size = sizeof(struct cppi5_tr_type1_t);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002411 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002412 if (!d)
2413 return NULL;
2414
2415 tr_req = d->hwdesc[0].tr_req_base;
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002416 period_addr = buf_addr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002417 for (i = 0; i < periods; i++) {
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002418 int tr_idx = i * num_tr;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002419
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002420 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2421 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2422
2423 tr_req[tr_idx].addr = period_addr;
2424 tr_req[tr_idx].icnt0 = tr0_cnt0;
2425 tr_req[tr_idx].icnt1 = tr0_cnt1;
2426 tr_req[tr_idx].dim1 = tr0_cnt0;
2427
2428 if (num_tr == 2) {
2429 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2430 CPPI5_TR_CSF_SUPR_EVT);
2431 tr_idx++;
2432
2433 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2434 false, false,
2435 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2436
2437 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2438 tr_req[tr_idx].icnt0 = tr1_cnt0;
2439 tr_req[tr_idx].icnt1 = 1;
2440 tr_req[tr_idx].dim1 = tr1_cnt0;
2441 }
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002442
2443 if (!(flags & DMA_PREP_INTERRUPT))
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002444 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002445 CPPI5_TR_CSF_SUPR_EVT);
Peter Ujfalusi6cf668a2020-02-14 11:14:39 +02002446
2447 period_addr += period_len;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002448 }
2449
2450 return d;
2451}
2452
2453static struct udma_desc *
2454udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2455 size_t buf_len, size_t period_len,
2456 enum dma_transfer_direction dir, unsigned long flags)
2457{
2458 struct udma_desc *d;
2459 u32 ring_id;
2460 int i;
2461 int periods = buf_len / period_len;
2462
2463 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2464 return NULL;
2465
2466 if (period_len >= SZ_4M)
2467 return NULL;
2468
Gustavo A. R. Silvaace52a8c2020-06-19 17:43:34 -05002469 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002470 if (!d)
2471 return NULL;
2472
2473 d->hwdesc_count = periods;
2474
2475 /* TODO: re-check this... */
2476 if (dir == DMA_DEV_TO_MEM)
2477 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2478 else
2479 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2480
2481 for (i = 0; i < periods; i++) {
2482 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2483 dma_addr_t period_addr = buf_addr + (period_len * i);
2484 struct cppi5_host_desc_t *h_desc;
2485
2486 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2487 GFP_NOWAIT,
2488 &hwdesc->cppi5_desc_paddr);
2489 if (!hwdesc->cppi5_desc_vaddr) {
2490 dev_err(uc->ud->dev,
2491 "descriptor%d allocation failed\n", i);
2492
2493 udma_free_hwdesc(uc, d);
2494 kfree(d);
2495 return NULL;
2496 }
2497
2498 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2499 h_desc = hwdesc->cppi5_desc_vaddr;
2500
2501 cppi5_hdesc_init(h_desc, 0, 0);
2502 cppi5_hdesc_set_pktlen(h_desc, period_len);
2503
2504 /* Flow and Packed ID */
2505 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2506 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2507 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2508
2509 /* attach each period to a new descriptor */
2510 cppi5_hdesc_attach_buf(h_desc,
2511 period_addr, period_len,
2512 period_addr, period_len);
2513 }
2514
2515 return d;
2516}
2517
2518static struct dma_async_tx_descriptor *
2519udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2520 size_t period_len, enum dma_transfer_direction dir,
2521 unsigned long flags)
2522{
2523 struct udma_chan *uc = to_udma_chan(chan);
2524 enum dma_slave_buswidth dev_width;
2525 struct udma_desc *d;
2526 u32 burst;
2527
2528 if (dir != uc->config.dir) {
2529 dev_err(chan->device->dev,
2530 "%s: chan%d is for %s, not supporting %s\n",
2531 __func__, uc->id,
2532 dmaengine_get_direction_text(uc->config.dir),
2533 dmaengine_get_direction_text(dir));
2534 return NULL;
2535 }
2536
2537 uc->cyclic = true;
2538
2539 if (dir == DMA_DEV_TO_MEM) {
2540 dev_width = uc->cfg.src_addr_width;
2541 burst = uc->cfg.src_maxburst;
2542 } else if (dir == DMA_MEM_TO_DEV) {
2543 dev_width = uc->cfg.dst_addr_width;
2544 burst = uc->cfg.dst_maxburst;
2545 } else {
2546 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2547 return NULL;
2548 }
2549
2550 if (!burst)
2551 burst = 1;
2552
2553 if (uc->config.pkt_mode)
2554 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2555 dir, flags);
2556 else
2557 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2558 dir, flags);
2559
2560 if (!d)
2561 return NULL;
2562
2563 d->sglen = buf_len / period_len;
2564
2565 d->dir = dir;
2566 d->residue = buf_len;
2567
2568 /* static TR for remote PDMA */
2569 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2570 dev_err(uc->ud->dev,
Colin Ian King6c0157b2020-01-22 09:38:18 +00002571 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002572 __func__, d->static_tr.bstcnt);
2573
2574 udma_free_hwdesc(uc, d);
2575 kfree(d);
2576 return NULL;
2577 }
2578
2579 if (uc->config.metadata_size)
2580 d->vd.tx.metadata_ops = &metadata_ops;
2581
2582 return vchan_tx_prep(&uc->vc, &d->vd, flags);
2583}
2584
2585static struct dma_async_tx_descriptor *
2586udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2587 size_t len, unsigned long tx_flags)
2588{
2589 struct udma_chan *uc = to_udma_chan(chan);
2590 struct udma_desc *d;
2591 struct cppi5_tr_type15_t *tr_req;
2592 int num_tr;
2593 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2594 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2595
2596 if (uc->config.dir != DMA_MEM_TO_MEM) {
2597 dev_err(chan->device->dev,
2598 "%s: chan%d is for %s, not supporting %s\n",
2599 __func__, uc->id,
2600 dmaengine_get_direction_text(uc->config.dir),
2601 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2602 return NULL;
2603 }
2604
Peter Ujfalusia9793402020-02-14 11:14:38 +02002605 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2606 &tr0_cnt1, &tr1_cnt0);
2607 if (num_tr < 0) {
2608 dev_err(uc->ud->dev, "size %zu is not supported\n",
2609 len);
2610 return NULL;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002611 }
2612
2613 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2614 if (!d)
2615 return NULL;
2616
2617 d->dir = DMA_MEM_TO_MEM;
2618 d->desc_idx = 0;
2619 d->tr_idx = 0;
2620 d->residue = len;
2621
2622 tr_req = d->hwdesc[0].tr_req_base;
2623
2624 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2625 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2626 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2627
2628 tr_req[0].addr = src;
2629 tr_req[0].icnt0 = tr0_cnt0;
2630 tr_req[0].icnt1 = tr0_cnt1;
2631 tr_req[0].icnt2 = 1;
2632 tr_req[0].icnt3 = 1;
2633 tr_req[0].dim1 = tr0_cnt0;
2634
2635 tr_req[0].daddr = dest;
2636 tr_req[0].dicnt0 = tr0_cnt0;
2637 tr_req[0].dicnt1 = tr0_cnt1;
2638 tr_req[0].dicnt2 = 1;
2639 tr_req[0].dicnt3 = 1;
2640 tr_req[0].ddim1 = tr0_cnt0;
2641
2642 if (num_tr == 2) {
2643 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2644 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2645 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2646
2647 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2648 tr_req[1].icnt0 = tr1_cnt0;
2649 tr_req[1].icnt1 = 1;
2650 tr_req[1].icnt2 = 1;
2651 tr_req[1].icnt3 = 1;
2652
2653 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2654 tr_req[1].dicnt0 = tr1_cnt0;
2655 tr_req[1].dicnt1 = 1;
2656 tr_req[1].dicnt2 = 1;
2657 tr_req[1].dicnt3 = 1;
2658 }
2659
Peter Ujfalusibe4054b2020-05-12 16:45:31 +03002660 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
2661 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002662
2663 if (uc->config.metadata_size)
2664 d->vd.tx.metadata_ops = &metadata_ops;
2665
2666 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2667}
2668
2669static void udma_issue_pending(struct dma_chan *chan)
2670{
2671 struct udma_chan *uc = to_udma_chan(chan);
2672 unsigned long flags;
2673
2674 spin_lock_irqsave(&uc->vc.lock, flags);
2675
2676 /* If we have something pending and no active descriptor, then */
2677 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2678 /*
2679 * start a descriptor if the channel is NOT [marked as
2680 * terminating _and_ it is still running (teardown has not
2681 * completed yet)].
2682 */
2683 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2684 udma_is_chan_running(uc)))
2685 udma_start(uc);
2686 }
2687
2688 spin_unlock_irqrestore(&uc->vc.lock, flags);
2689}
2690
2691static enum dma_status udma_tx_status(struct dma_chan *chan,
2692 dma_cookie_t cookie,
2693 struct dma_tx_state *txstate)
2694{
2695 struct udma_chan *uc = to_udma_chan(chan);
2696 enum dma_status ret;
2697 unsigned long flags;
2698
2699 spin_lock_irqsave(&uc->vc.lock, flags);
2700
2701 ret = dma_cookie_status(chan, cookie, txstate);
2702
Peter Ujfalusi83903182020-02-14 11:14:41 +02002703 if (!udma_is_chan_running(uc))
2704 ret = DMA_COMPLETE;
2705
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002706 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2707 ret = DMA_PAUSED;
2708
2709 if (ret == DMA_COMPLETE || !txstate)
2710 goto out;
2711
2712 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2713 u32 peer_bcnt = 0;
2714 u32 bcnt = 0;
2715 u32 residue = uc->desc->residue;
2716 u32 delay = 0;
2717
2718 if (uc->desc->dir == DMA_MEM_TO_DEV) {
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002719 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002720
2721 if (uc->config.ep_type != PSIL_EP_NATIVE) {
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002722 peer_bcnt = udma_tchanrt_read(uc,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002723 UDMA_CHAN_RT_PEER_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002724
2725 if (bcnt > peer_bcnt)
2726 delay = bcnt - peer_bcnt;
2727 }
2728 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002729 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002730
2731 if (uc->config.ep_type != PSIL_EP_NATIVE) {
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002732 peer_bcnt = udma_rchanrt_read(uc,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03002733 UDMA_CHAN_RT_PEER_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002734
2735 if (peer_bcnt > bcnt)
2736 delay = peer_bcnt - bcnt;
2737 }
2738 } else {
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002739 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002740 }
2741
2742 bcnt -= uc->bcnt;
2743 if (bcnt && !(bcnt % uc->desc->residue))
2744 residue = 0;
2745 else
2746 residue -= bcnt % uc->desc->residue;
2747
2748 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2749 ret = DMA_COMPLETE;
2750 delay = 0;
2751 }
2752
2753 dma_set_residue(txstate, residue);
2754 dma_set_in_flight_bytes(txstate, delay);
2755
2756 } else {
2757 ret = DMA_COMPLETE;
2758 }
2759
2760out:
2761 spin_unlock_irqrestore(&uc->vc.lock, flags);
2762 return ret;
2763}
2764
2765static int udma_pause(struct dma_chan *chan)
2766{
2767 struct udma_chan *uc = to_udma_chan(chan);
2768
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002769 /* pause the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002770 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002771 case DMA_DEV_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002772 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002773 UDMA_PEER_RT_EN_PAUSE,
2774 UDMA_PEER_RT_EN_PAUSE);
2775 break;
2776 case DMA_MEM_TO_DEV:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002777 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002778 UDMA_PEER_RT_EN_PAUSE,
2779 UDMA_PEER_RT_EN_PAUSE);
2780 break;
2781 case DMA_MEM_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002782 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002783 UDMA_CHAN_RT_CTL_PAUSE,
2784 UDMA_CHAN_RT_CTL_PAUSE);
2785 break;
2786 default:
2787 return -EINVAL;
2788 }
2789
2790 return 0;
2791}
2792
2793static int udma_resume(struct dma_chan *chan)
2794{
2795 struct udma_chan *uc = to_udma_chan(chan);
2796
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002797 /* resume the channel */
Peter Ujfalusic7450bb2020-02-14 11:14:40 +02002798 switch (uc->config.dir) {
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002799 case DMA_DEV_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002800 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002801 UDMA_PEER_RT_EN_PAUSE, 0);
2802
2803 break;
2804 case DMA_MEM_TO_DEV:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002805 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002806 UDMA_PEER_RT_EN_PAUSE, 0);
2807 break;
2808 case DMA_MEM_TO_MEM:
Peter Ujfalusidb375dc2020-07-07 13:23:52 +03002809 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002810 UDMA_CHAN_RT_CTL_PAUSE, 0);
2811 break;
2812 default:
2813 return -EINVAL;
2814 }
2815
2816 return 0;
2817}
2818
2819static int udma_terminate_all(struct dma_chan *chan)
2820{
2821 struct udma_chan *uc = to_udma_chan(chan);
2822 unsigned long flags;
2823 LIST_HEAD(head);
2824
2825 spin_lock_irqsave(&uc->vc.lock, flags);
2826
2827 if (udma_is_chan_running(uc))
2828 udma_stop(uc);
2829
2830 if (uc->desc) {
2831 uc->terminated_desc = uc->desc;
2832 uc->desc = NULL;
2833 uc->terminated_desc->terminated = true;
2834 cancel_delayed_work(&uc->tx_drain.work);
2835 }
2836
2837 uc->paused = false;
2838
2839 vchan_get_all_descriptors(&uc->vc, &head);
2840 spin_unlock_irqrestore(&uc->vc.lock, flags);
2841 vchan_dma_desc_free_list(&uc->vc, &head);
2842
2843 return 0;
2844}
2845
2846static void udma_synchronize(struct dma_chan *chan)
2847{
2848 struct udma_chan *uc = to_udma_chan(chan);
2849 unsigned long timeout = msecs_to_jiffies(1000);
2850
2851 vchan_synchronize(&uc->vc);
2852
2853 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2854 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2855 timeout);
2856 if (!timeout) {
2857 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2858 uc->id);
2859 udma_dump_chan_stdata(uc);
2860 udma_reset_chan(uc, true);
2861 }
2862 }
2863
2864 udma_reset_chan(uc, false);
2865 if (udma_is_chan_running(uc))
2866 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2867
2868 cancel_delayed_work_sync(&uc->tx_drain.work);
2869 udma_reset_rings(uc);
2870}
2871
2872static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2873 struct virt_dma_desc *vd,
2874 struct dmaengine_result *result)
2875{
2876 struct udma_chan *uc = to_udma_chan(&vc->chan);
2877 struct udma_desc *d;
2878
2879 if (!vd)
2880 return;
2881
2882 d = to_udma_desc(&vd->tx);
2883
2884 if (d->metadata_size)
2885 udma_fetch_epib(uc, d);
2886
2887 /* Provide residue information for the client */
2888 if (result) {
2889 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2890
2891 if (cppi5_desc_get_type(desc_vaddr) ==
2892 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2893 result->residue = d->residue -
2894 cppi5_hdesc_get_pktlen(desc_vaddr);
2895 if (result->residue)
2896 result->result = DMA_TRANS_ABORTED;
2897 else
2898 result->result = DMA_TRANS_NOERROR;
2899 } else {
2900 result->residue = 0;
2901 result->result = DMA_TRANS_NOERROR;
2902 }
2903 }
2904}
2905
2906/*
2907 * This tasklet handles the completion of a DMA descriptor by
2908 * calling its callback and freeing it.
2909 */
Allen Pais2fa9bc92020-08-31 16:05:42 +05302910static void udma_vchan_complete(struct tasklet_struct *t)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002911{
Allen Pais2fa9bc92020-08-31 16:05:42 +05302912 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002913 struct virt_dma_desc *vd, *_vd;
2914 struct dmaengine_desc_callback cb;
2915 LIST_HEAD(head);
2916
2917 spin_lock_irq(&vc->lock);
2918 list_splice_tail_init(&vc->desc_completed, &head);
2919 vd = vc->cyclic;
2920 if (vd) {
2921 vc->cyclic = NULL;
2922 dmaengine_desc_get_callback(&vd->tx, &cb);
2923 } else {
2924 memset(&cb, 0, sizeof(cb));
2925 }
2926 spin_unlock_irq(&vc->lock);
2927
2928 udma_desc_pre_callback(vc, vd, NULL);
2929 dmaengine_desc_callback_invoke(&cb, NULL);
2930
2931 list_for_each_entry_safe(vd, _vd, &head, node) {
2932 struct dmaengine_result result;
2933
2934 dmaengine_desc_get_callback(&vd->tx, &cb);
2935
2936 list_del(&vd->node);
2937
2938 udma_desc_pre_callback(vc, vd, &result);
2939 dmaengine_desc_callback_invoke(&cb, &result);
2940
2941 vchan_vdesc_fini(vd);
2942 }
2943}
2944
2945static void udma_free_chan_resources(struct dma_chan *chan)
2946{
2947 struct udma_chan *uc = to_udma_chan(chan);
2948 struct udma_dev *ud = to_udma_dev(chan->device);
2949
2950 udma_terminate_all(chan);
2951 if (uc->terminated_desc) {
2952 udma_reset_chan(uc, false);
2953 udma_reset_rings(uc);
2954 }
2955
2956 cancel_delayed_work_sync(&uc->tx_drain.work);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002957
2958 if (uc->irq_num_ring > 0) {
2959 free_irq(uc->irq_num_ring, uc);
2960
2961 uc->irq_num_ring = 0;
2962 }
2963 if (uc->irq_num_udma > 0) {
2964 free_irq(uc->irq_num_udma, uc);
2965
2966 uc->irq_num_udma = 0;
2967 }
2968
2969 /* Release PSI-L pairing */
2970 if (uc->psil_paired) {
2971 navss_psil_unpair(ud, uc->config.src_thread,
2972 uc->config.dst_thread);
2973 uc->psil_paired = false;
2974 }
2975
2976 vchan_free_chan_resources(&uc->vc);
2977 tasklet_kill(&uc->vc.task);
2978
2979 udma_free_tx_resources(uc);
2980 udma_free_rx_resources(uc);
2981 udma_reset_uchan(uc);
2982
2983 if (uc->use_dma_pool) {
2984 dma_pool_destroy(uc->hdesc_pool);
2985 uc->use_dma_pool = false;
2986 }
2987}
2988
2989static struct platform_driver udma_driver;
2990
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02002991struct udma_filter_param {
2992 int remote_thread_id;
2993 u32 atype;
2994};
2995
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02002996static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
2997{
2998 struct udma_chan_config *ucc;
2999 struct psil_endpoint_config *ep_config;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003000 struct udma_filter_param *filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003001 struct udma_chan *uc;
3002 struct udma_dev *ud;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003003
3004 if (chan->device->dev->driver != &udma_driver.driver)
3005 return false;
3006
3007 uc = to_udma_chan(chan);
3008 ucc = &uc->config;
3009 ud = uc->ud;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003010 filter_param = param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003011
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003012 if (filter_param->atype > 2) {
3013 dev_err(ud->dev, "Invalid channel atype: %u\n",
3014 filter_param->atype);
3015 return false;
3016 }
3017
3018 ucc->remote_thread_id = filter_param->remote_thread_id;
3019 ucc->atype = filter_param->atype;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003020
3021 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3022 ucc->dir = DMA_MEM_TO_DEV;
3023 else
3024 ucc->dir = DMA_DEV_TO_MEM;
3025
3026 ep_config = psil_get_ep_config(ucc->remote_thread_id);
3027 if (IS_ERR(ep_config)) {
3028 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3029 ucc->remote_thread_id);
3030 ucc->dir = DMA_MEM_TO_MEM;
3031 ucc->remote_thread_id = -1;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003032 ucc->atype = 0;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003033 return false;
3034 }
3035
3036 ucc->pkt_mode = ep_config->pkt_mode;
3037 ucc->channel_tpl = ep_config->channel_tpl;
3038 ucc->notdpkt = ep_config->notdpkt;
3039 ucc->ep_type = ep_config->ep_type;
3040
3041 if (ucc->ep_type != PSIL_EP_NATIVE) {
3042 const struct udma_match_data *match_data = ud->match_data;
3043
3044 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3045 ucc->enable_acc32 = ep_config->pdma_acc32;
3046 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3047 ucc->enable_burst = ep_config->pdma_burst;
3048 }
3049
3050 ucc->needs_epib = ep_config->needs_epib;
3051 ucc->psd_size = ep_config->psd_size;
3052 ucc->metadata_size =
3053 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3054 ucc->psd_size;
3055
3056 if (ucc->pkt_mode)
3057 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3058 ucc->metadata_size, ud->desc_align);
3059
3060 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3061 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3062
3063 return true;
3064}
3065
3066static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3067 struct of_dma *ofdma)
3068{
3069 struct udma_dev *ud = ofdma->of_dma_data;
3070 dma_cap_mask_t mask = ud->ddev.cap_mask;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003071 struct udma_filter_param filter_param;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003072 struct dma_chan *chan;
3073
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003074 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003075 return NULL;
3076
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003077 filter_param.remote_thread_id = dma_spec->args[0];
3078 if (dma_spec->args_count == 2)
3079 filter_param.atype = dma_spec->args[1];
3080 else
3081 filter_param.atype = 0;
3082
3083 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
3084 ofdma->of_node);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003085 if (!chan) {
3086 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3087 return ERR_PTR(-EINVAL);
3088 }
3089
3090 return chan;
3091}
3092
3093static struct udma_match_data am654_main_data = {
3094 .psil_base = 0x1000,
3095 .enable_memcpy_support = true,
3096 .statictr_z_mask = GENMASK(11, 0),
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003097};
3098
3099static struct udma_match_data am654_mcu_data = {
3100 .psil_base = 0x6000,
Peter Ujfalusia4e68852020-03-27 16:42:28 +02003101 .enable_memcpy_support = false,
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003102 .statictr_z_mask = GENMASK(11, 0),
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003103};
3104
3105static struct udma_match_data j721e_main_data = {
3106 .psil_base = 0x1000,
3107 .enable_memcpy_support = true,
3108 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3109 .statictr_z_mask = GENMASK(23, 0),
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003110};
3111
3112static struct udma_match_data j721e_mcu_data = {
3113 .psil_base = 0x6000,
3114 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3115 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3116 .statictr_z_mask = GENMASK(23, 0),
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003117};
3118
3119static const struct of_device_id udma_of_match[] = {
3120 {
3121 .compatible = "ti,am654-navss-main-udmap",
3122 .data = &am654_main_data,
3123 },
3124 {
3125 .compatible = "ti,am654-navss-mcu-udmap",
3126 .data = &am654_mcu_data,
3127 }, {
3128 .compatible = "ti,j721e-navss-main-udmap",
3129 .data = &j721e_main_data,
3130 }, {
3131 .compatible = "ti,j721e-navss-mcu-udmap",
3132 .data = &j721e_mcu_data,
3133 },
3134 { /* Sentinel */ },
3135};
3136
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +03003137static struct udma_soc_data am654_soc_data = {
3138 .rchan_oes_offset = 0x200,
3139};
3140
3141static struct udma_soc_data j721e_soc_data = {
3142 .rchan_oes_offset = 0x400,
3143};
3144
3145static struct udma_soc_data j7200_soc_data = {
3146 .rchan_oes_offset = 0x80,
3147};
3148
3149static const struct soc_device_attribute k3_soc_devices[] = {
3150 { .family = "AM65X", .data = &am654_soc_data },
3151 { .family = "J721E", .data = &j721e_soc_data },
3152 { .family = "J7200", .data = &j7200_soc_data },
3153 { /* sentinel */ }
3154};
3155
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003156static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3157{
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003158 int i;
3159
3160 for (i = 0; i < MMR_LAST; i++) {
Zhang Qilongea275002020-09-21 17:37:01 +08003161 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003162 if (IS_ERR(ud->mmrs[i]))
3163 return PTR_ERR(ud->mmrs[i]);
3164 }
3165
3166 return 0;
3167}
3168
3169static int udma_setup_resources(struct udma_dev *ud)
3170{
3171 struct device *dev = ud->dev;
3172 int ch_count, ret, i, j;
3173 u32 cap2, cap3;
3174 struct ti_sci_resource_desc *rm_desc;
3175 struct ti_sci_resource *rm_res, irq_res;
3176 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3177 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3178 "ti,sci-rm-range-rchan",
3179 "ti,sci-rm-range-rflow" };
3180
Peter Ujfalusi44385c42020-07-17 15:09:02 +03003181 cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2));
3182 cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3));
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003183
Peter Ujfalusi44385c42020-07-17 15:09:02 +03003184 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
3185 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
3186 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
3187 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003188 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3189
Peter Ujfalusidaf4ad02020-07-17 15:09:03 +03003190 /* Set up the throughput level start indexes */
3191 if (of_device_is_compatible(dev->of_node,
3192 "ti,am654-navss-main-udmap")) {
3193 ud->tpl_levels = 2;
3194 ud->tpl_start_idx[0] = 8;
3195 } else if (of_device_is_compatible(dev->of_node,
3196 "ti,am654-navss-mcu-udmap")) {
3197 ud->tpl_levels = 2;
3198 ud->tpl_start_idx[0] = 2;
3199 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
3200 ud->tpl_levels = 3;
3201 ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
Peter Ujfalusie2de9252020-12-08 11:04:21 +02003202 ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
Peter Ujfalusidaf4ad02020-07-17 15:09:03 +03003203 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
3204 ud->tpl_levels = 2;
3205 ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
3206 } else {
3207 ud->tpl_levels = 1;
3208 }
3209
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003210 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3211 sizeof(unsigned long), GFP_KERNEL);
3212 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3213 GFP_KERNEL);
3214 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3215 sizeof(unsigned long), GFP_KERNEL);
3216 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3217 GFP_KERNEL);
3218 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3219 sizeof(unsigned long),
3220 GFP_KERNEL);
3221 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3222 BITS_TO_LONGS(ud->rflow_cnt),
3223 sizeof(unsigned long),
3224 GFP_KERNEL);
3225 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3226 sizeof(unsigned long),
3227 GFP_KERNEL);
3228 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3229 GFP_KERNEL);
3230
3231 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3232 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3233 !ud->rflows || !ud->rflow_in_use)
3234 return -ENOMEM;
3235
3236 /*
3237 * RX flows with the same Ids as RX channels are reserved to be used
3238 * as default flows if remote HW can't generate flow_ids. Those
3239 * RX flows can be requested only explicitly by id.
3240 */
3241 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3242
3243 /* by default no GP rflows are assigned to Linux */
3244 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3245
3246 /* Get resource ranges from tisci */
3247 for (i = 0; i < RM_RANGE_LAST; i++)
3248 tisci_rm->rm_ranges[i] =
3249 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3250 tisci_rm->tisci_dev_id,
3251 (char *)range_names[i]);
3252
3253 /* tchan ranges */
3254 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3255 if (IS_ERR(rm_res)) {
3256 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3257 } else {
3258 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3259 for (i = 0; i < rm_res->sets; i++) {
3260 rm_desc = &rm_res->desc[i];
3261 bitmap_clear(ud->tchan_map, rm_desc->start,
3262 rm_desc->num);
3263 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3264 rm_desc->start, rm_desc->num);
3265 }
3266 }
3267 irq_res.sets = rm_res->sets;
3268
3269 /* rchan and matching default flow ranges */
3270 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3271 if (IS_ERR(rm_res)) {
3272 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3273 } else {
3274 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3275 for (i = 0; i < rm_res->sets; i++) {
3276 rm_desc = &rm_res->desc[i];
3277 bitmap_clear(ud->rchan_map, rm_desc->start,
3278 rm_desc->num);
3279 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3280 rm_desc->start, rm_desc->num);
3281 }
3282 }
3283
3284 irq_res.sets += rm_res->sets;
3285 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3286 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3287 for (i = 0; i < rm_res->sets; i++) {
3288 irq_res.desc[i].start = rm_res->desc[i].start;
3289 irq_res.desc[i].num = rm_res->desc[i].num;
3290 }
3291 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3292 for (j = 0; j < rm_res->sets; j++, i++) {
3293 irq_res.desc[i].start = rm_res->desc[j].start +
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +03003294 ud->soc_data->rchan_oes_offset;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003295 irq_res.desc[i].num = rm_res->desc[j].num;
3296 }
3297 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3298 kfree(irq_res.desc);
3299 if (ret) {
3300 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3301 return ret;
3302 }
3303
3304 /* GP rflow ranges */
3305 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3306 if (IS_ERR(rm_res)) {
3307 /* all gp flows are assigned exclusively to Linux */
3308 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3309 ud->rflow_cnt - ud->rchan_cnt);
3310 } else {
3311 for (i = 0; i < rm_res->sets; i++) {
3312 rm_desc = &rm_res->desc[i];
3313 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3314 rm_desc->num);
3315 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3316 rm_desc->start, rm_desc->num);
3317 }
3318 }
3319
3320 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3321 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3322 if (!ch_count)
3323 return -ENODEV;
3324
3325 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3326 GFP_KERNEL);
3327 if (!ud->channels)
3328 return -ENOMEM;
3329
3330 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3331 ch_count,
3332 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3333 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3334 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3335 ud->rflow_cnt));
3336
3337 return ch_count;
3338}
3339
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003340static int udma_setup_rx_flush(struct udma_dev *ud)
3341{
3342 struct udma_rx_flush *rx_flush = &ud->rx_flush;
3343 struct cppi5_desc_hdr_t *tr_desc;
3344 struct cppi5_tr_type1_t *tr_req;
3345 struct cppi5_host_desc_t *desc;
3346 struct device *dev = ud->dev;
3347 struct udma_hwdesc *hwdesc;
3348 size_t tr_size;
3349
3350 /* Allocate 1K buffer for discarded data on RX channel teardown */
3351 rx_flush->buffer_size = SZ_1K;
3352 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3353 GFP_KERNEL);
3354 if (!rx_flush->buffer_vaddr)
3355 return -ENOMEM;
3356
3357 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3358 rx_flush->buffer_size,
3359 DMA_TO_DEVICE);
3360 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3361 return -ENOMEM;
3362
3363 /* Set up descriptor to be used for TR mode */
3364 hwdesc = &rx_flush->hwdescs[0];
3365 tr_size = sizeof(struct cppi5_tr_type1_t);
3366 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3367 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3368 ud->desc_align);
3369
3370 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3371 GFP_KERNEL);
3372 if (!hwdesc->cppi5_desc_vaddr)
3373 return -ENOMEM;
3374
3375 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3376 hwdesc->cppi5_desc_size,
3377 DMA_TO_DEVICE);
3378 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3379 return -ENOMEM;
3380
3381 /* Start of the TR req records */
3382 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3383 /* Start address of the TR response array */
3384 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3385
3386 tr_desc = hwdesc->cppi5_desc_vaddr;
3387 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3388 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3389 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3390
3391 tr_req = hwdesc->tr_req_base;
3392 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3393 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3394 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3395
3396 tr_req->addr = rx_flush->buffer_paddr;
3397 tr_req->icnt0 = rx_flush->buffer_size;
3398 tr_req->icnt1 = 1;
3399
Peter Ujfalusi5bbeea32020-05-12 16:45:44 +03003400 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3401 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3402
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003403 /* Set up descriptor to be used for packet mode */
3404 hwdesc = &rx_flush->hwdescs[1];
3405 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3406 CPPI5_INFO0_HDESC_EPIB_SIZE +
3407 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3408 ud->desc_align);
3409
3410 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3411 GFP_KERNEL);
3412 if (!hwdesc->cppi5_desc_vaddr)
3413 return -ENOMEM;
3414
3415 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3416 hwdesc->cppi5_desc_size,
3417 DMA_TO_DEVICE);
3418 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3419 return -ENOMEM;
3420
3421 desc = hwdesc->cppi5_desc_vaddr;
3422 cppi5_hdesc_init(desc, 0, 0);
3423 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3424 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3425
3426 cppi5_hdesc_attach_buf(desc,
3427 rx_flush->buffer_paddr, rx_flush->buffer_size,
3428 rx_flush->buffer_paddr, rx_flush->buffer_size);
3429
3430 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3431 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3432 return 0;
3433}
3434
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003435#ifdef CONFIG_DEBUG_FS
3436static void udma_dbg_summary_show_chan(struct seq_file *s,
3437 struct dma_chan *chan)
3438{
3439 struct udma_chan *uc = to_udma_chan(chan);
3440 struct udma_chan_config *ucc = &uc->config;
3441
3442 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
3443 chan->dbg_client_name ?: "in-use");
3444 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
3445
3446 switch (uc->config.dir) {
3447 case DMA_MEM_TO_MEM:
3448 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
3449 ucc->src_thread, ucc->dst_thread);
3450 break;
3451 case DMA_DEV_TO_MEM:
3452 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
3453 ucc->src_thread, ucc->dst_thread);
3454 break;
3455 case DMA_MEM_TO_DEV:
3456 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
3457 ucc->src_thread, ucc->dst_thread);
3458 break;
3459 default:
3460 seq_printf(s, ")\n");
3461 return;
3462 }
3463
3464 if (ucc->ep_type == PSIL_EP_NATIVE) {
3465 seq_printf(s, "PSI-L Native");
3466 if (ucc->metadata_size) {
3467 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
3468 if (ucc->psd_size)
3469 seq_printf(s, " PSDsize:%u", ucc->psd_size);
3470 seq_printf(s, " ]");
3471 }
3472 } else {
3473 seq_printf(s, "PDMA");
3474 if (ucc->enable_acc32 || ucc->enable_burst)
3475 seq_printf(s, "[%s%s ]",
3476 ucc->enable_acc32 ? " ACC32" : "",
3477 ucc->enable_burst ? " BURST" : "");
3478 }
3479
3480 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
3481}
3482
3483static void udma_dbg_summary_show(struct seq_file *s,
3484 struct dma_device *dma_dev)
3485{
3486 struct dma_chan *chan;
3487
3488 list_for_each_entry(chan, &dma_dev->channels, device_node) {
3489 if (chan->client_count)
3490 udma_dbg_summary_show_chan(s, chan);
3491 }
3492}
3493#endif /* CONFIG_DEBUG_FS */
3494
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003495#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3496 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3497 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3498 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3499 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3500
3501static int udma_probe(struct platform_device *pdev)
3502{
3503 struct device_node *navss_node = pdev->dev.parent->of_node;
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +03003504 const struct soc_device_attribute *soc;
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003505 struct device *dev = &pdev->dev;
3506 struct udma_dev *ud;
3507 const struct of_device_id *match;
3508 int i, ret;
3509 int ch_count;
3510
3511 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3512 if (ret)
3513 dev_err(dev, "failed to set dma mask stuff\n");
3514
3515 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3516 if (!ud)
3517 return -ENOMEM;
3518
3519 ret = udma_get_mmrs(pdev, ud);
3520 if (ret)
3521 return ret;
3522
3523 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3524 if (IS_ERR(ud->tisci_rm.tisci))
3525 return PTR_ERR(ud->tisci_rm.tisci);
3526
3527 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3528 &ud->tisci_rm.tisci_dev_id);
3529 if (ret) {
3530 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3531 return ret;
3532 }
3533 pdev->id = ud->tisci_rm.tisci_dev_id;
3534
3535 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3536 &ud->tisci_rm.tisci_navss_dev_id);
3537 if (ret) {
3538 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3539 return ret;
3540 }
3541
Peter Ujfalusi9f2f3ce2020-05-27 09:53:57 +03003542 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +02003543 if (!ret && ud->atype > 2) {
3544 dev_err(dev, "Invalid atype: %u\n", ud->atype);
3545 return -EINVAL;
3546 }
3547
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003548 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3549 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3550
3551 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3552 if (IS_ERR(ud->ringacc))
3553 return PTR_ERR(ud->ringacc);
3554
3555 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3556 DOMAIN_BUS_TI_SCI_INTA_MSI);
3557 if (!dev->msi_domain) {
3558 dev_err(dev, "Failed to get MSI domain\n");
3559 return -EPROBE_DEFER;
3560 }
3561
3562 match = of_match_node(udma_of_match, dev->of_node);
3563 if (!match) {
3564 dev_err(dev, "No compatible match found\n");
3565 return -ENODEV;
3566 }
3567 ud->match_data = match->data;
3568
Peter Ujfalusif9b0366f52020-09-10 15:43:29 +03003569 soc = soc_device_match(k3_soc_devices);
3570 if (!soc) {
3571 dev_err(dev, "No compatible SoC found\n");
3572 return -ENODEV;
3573 }
3574 ud->soc_data = soc->data;
3575
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003576 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3577 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3578
3579 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3580 ud->ddev.device_config = udma_slave_config;
3581 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3582 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3583 ud->ddev.device_issue_pending = udma_issue_pending;
3584 ud->ddev.device_tx_status = udma_tx_status;
3585 ud->ddev.device_pause = udma_pause;
3586 ud->ddev.device_resume = udma_resume;
3587 ud->ddev.device_terminate_all = udma_terminate_all;
3588 ud->ddev.device_synchronize = udma_synchronize;
Peter Ujfalusidb8d9b42020-03-06 16:28:38 +02003589#ifdef CONFIG_DEBUG_FS
3590 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3591#endif
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003592
3593 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3594 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3595 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3596 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3597 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3598 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3599 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3600 DESC_METADATA_ENGINE;
3601 if (ud->match_data->enable_memcpy_support) {
3602 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3603 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3604 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3605 }
3606
3607 ud->ddev.dev = dev;
3608 ud->dev = dev;
3609 ud->psil_base = ud->match_data->psil_base;
3610
3611 INIT_LIST_HEAD(&ud->ddev.channels);
3612 INIT_LIST_HEAD(&ud->desc_to_purge);
3613
3614 ch_count = udma_setup_resources(ud);
3615 if (ch_count <= 0)
3616 return ch_count;
3617
3618 spin_lock_init(&ud->lock);
3619 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3620
3621 ud->desc_align = 64;
3622 if (ud->desc_align < dma_get_cache_alignment())
3623 ud->desc_align = dma_get_cache_alignment();
3624
Peter Ujfalusi16cd3c62020-02-14 11:14:37 +02003625 ret = udma_setup_rx_flush(ud);
3626 if (ret)
3627 return ret;
3628
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003629 for (i = 0; i < ud->tchan_cnt; i++) {
3630 struct udma_tchan *tchan = &ud->tchans[i];
3631
3632 tchan->id = i;
3633 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3634 }
3635
3636 for (i = 0; i < ud->rchan_cnt; i++) {
3637 struct udma_rchan *rchan = &ud->rchans[i];
3638
3639 rchan->id = i;
3640 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3641 }
3642
3643 for (i = 0; i < ud->rflow_cnt; i++) {
3644 struct udma_rflow *rflow = &ud->rflows[i];
3645
3646 rflow->id = i;
3647 }
3648
3649 for (i = 0; i < ch_count; i++) {
3650 struct udma_chan *uc = &ud->channels[i];
3651
3652 uc->ud = ud;
3653 uc->vc.desc_free = udma_desc_free;
3654 uc->id = i;
3655 uc->tchan = NULL;
3656 uc->rchan = NULL;
3657 uc->config.remote_thread_id = -1;
3658 uc->config.dir = DMA_MEM_TO_MEM;
3659 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3660 dev_name(dev), i);
3661
3662 vchan_init(&uc->vc, &ud->ddev);
3663 /* Use custom vchan completion handling */
Allen Pais2fa9bc92020-08-31 16:05:42 +05303664 tasklet_setup(&uc->vc.task, udma_vchan_complete);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003665 init_completion(&uc->teardown_completed);
Peter Ujfalusid964d5f2020-06-18 14:40:04 +03003666 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
Peter Ujfalusi25dcb5d2019-12-23 13:04:50 +02003667 }
3668
3669 ret = dma_async_device_register(&ud->ddev);
3670 if (ret) {
3671 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3672 return ret;
3673 }
3674
3675 platform_set_drvdata(pdev, ud);
3676
3677 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3678 if (ret) {
3679 dev_err(dev, "failed to register of_dma controller\n");
3680 dma_async_device_unregister(&ud->ddev);
3681 }
3682
3683 return ret;
3684}
3685
3686static struct platform_driver udma_driver = {
3687 .driver = {
3688 .name = "ti-udma",
3689 .of_match_table = udma_of_match,
3690 .suppress_bind_attrs = true,
3691 },
3692 .probe = udma_probe,
3693};
3694builtin_platform_driver(udma_driver);
Grygorii Strashkod7024192019-12-23 13:04:51 +02003695
3696/* Private interfaces to UDMA */
3697#include "k3-udma-private.c"