blob: 12da38a9221848afa2e78fdc1a622b2e4d40f39c [file] [log] [blame]
Grygorii Strashkod7024192019-12-23 13:04:51 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * K3 NAVSS DMA glue interface
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6 *
7 */
8
9#include <linux/atomic.h>
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/io.h>
13#include <linux/init.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/soc/ti/k3-ringacc.h>
17#include <linux/dma/ti-cppi5.h>
18#include <linux/dma/k3-udma-glue.h>
19
20#include "k3-udma.h"
21#include "k3-psil-priv.h"
22
23struct k3_udma_glue_common {
24 struct device *dev;
25 struct udma_dev *udmax;
26 const struct udma_tisci_rm *tisci_rm;
27 struct k3_ringacc *ringacc;
28 u32 src_thread;
29 u32 dst_thread;
30
31 u32 hdesc_size;
32 bool epib;
33 u32 psdata_size;
34 u32 swdata_size;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +020035 u32 atype;
Grygorii Strashkod7024192019-12-23 13:04:51 +020036};
37
38struct k3_udma_glue_tx_channel {
39 struct k3_udma_glue_common common;
40
41 struct udma_tchan *udma_tchanx;
42 int udma_tchan_id;
43
44 struct k3_ring *ringtx;
45 struct k3_ring *ringtxcq;
46
47 bool psil_paired;
48
49 int virq;
50
51 atomic_t free_pkts;
52 bool tx_pause_on_err;
53 bool tx_filt_einfo;
54 bool tx_filt_pswords;
55 bool tx_supr_tdpkt;
56};
57
58struct k3_udma_glue_rx_flow {
59 struct udma_rflow *udma_rflow;
60 int udma_rflow_id;
61 struct k3_ring *ringrx;
62 struct k3_ring *ringrxfdq;
63
64 int virq;
65};
66
67struct k3_udma_glue_rx_channel {
68 struct k3_udma_glue_common common;
69
70 struct udma_rchan *udma_rchanx;
71 int udma_rchan_id;
72 bool remote;
73
74 bool psil_paired;
75
76 u32 swdata_size;
77 int flow_id_base;
78
79 struct k3_udma_glue_rx_flow *flows;
80 u32 flow_num;
81 u32 flows_ready;
82};
83
84#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
85
86static int of_k3_udma_glue_parse(struct device_node *udmax_np,
87 struct k3_udma_glue_common *common)
88{
89 common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
90 "ti,ringacc");
91 if (IS_ERR(common->ringacc))
92 return PTR_ERR(common->ringacc);
93
94 common->udmax = of_xudma_dev_get(udmax_np, NULL);
95 if (IS_ERR(common->udmax))
96 return PTR_ERR(common->udmax);
97
98 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
99
100 return 0;
101}
102
103static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
104 const char *name, struct k3_udma_glue_common *common,
105 bool tx_chn)
106{
107 struct psil_endpoint_config *ep_config;
108 struct of_phandle_args dma_spec;
109 u32 thread_id;
110 int ret = 0;
111 int index;
112
113 if (unlikely(!name))
114 return -EINVAL;
115
116 index = of_property_match_string(chn_np, "dma-names", name);
117 if (index < 0)
118 return index;
119
120 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
121 &dma_spec))
122 return -ENOENT;
123
124 thread_id = dma_spec.args[0];
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200125 if (dma_spec.args_count == 2) {
126 if (dma_spec.args[1] > 2) {
127 dev_err(common->dev, "Invalid channel atype: %u\n",
128 dma_spec.args[1]);
129 ret = -EINVAL;
130 goto out_put_spec;
131 }
132 common->atype = dma_spec.args[1];
133 }
Grygorii Strashkod7024192019-12-23 13:04:51 +0200134
135 if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
136 ret = -EINVAL;
137 goto out_put_spec;
138 }
139
140 if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
141 ret = -EINVAL;
142 goto out_put_spec;
143 }
144
145 /* get psil endpoint config */
146 ep_config = psil_get_ep_config(thread_id);
147 if (IS_ERR(ep_config)) {
148 dev_err(common->dev,
149 "No configuration for psi-l thread 0x%04x\n",
150 thread_id);
151 ret = PTR_ERR(ep_config);
152 goto out_put_spec;
153 }
154
155 common->epib = ep_config->needs_epib;
156 common->psdata_size = ep_config->psd_size;
157
158 if (tx_chn)
159 common->dst_thread = thread_id;
160 else
161 common->src_thread = thread_id;
162
163 ret = of_k3_udma_glue_parse(dma_spec.np, common);
164
165out_put_spec:
166 of_node_put(dma_spec.np);
167 return ret;
168};
169
170static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
171{
172 struct device *dev = tx_chn->common.dev;
173
174 dev_dbg(dev, "dump_tx_chn:\n"
175 "udma_tchan_id: %d\n"
176 "src_thread: %08x\n"
177 "dst_thread: %08x\n",
178 tx_chn->udma_tchan_id,
179 tx_chn->common.src_thread,
180 tx_chn->common.dst_thread);
181}
182
183static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
184 char *mark)
185{
186 struct device *dev = chn->common.dev;
187
188 dev_dbg(dev, "=== dump ===> %s\n", mark);
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300189 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
190 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
191 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
Grygorii Strashkod7024192019-12-23 13:04:51 +0200192 xudma_tchanrt_read(chn->udma_tchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300193 UDMA_CHAN_RT_PEER_RT_EN_REG));
194 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
195 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
196 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
197 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
198 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
199 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
Grygorii Strashkod7024192019-12-23 13:04:51 +0200200}
201
202static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
203{
204 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
205 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
206
207 memset(&req, 0, sizeof(req));
208
209 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
210 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
211 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
212 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
213 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
214 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200215 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
216 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200217 req.nav_id = tisci_rm->tisci_dev_id;
218 req.index = tx_chn->udma_tchan_id;
219 if (tx_chn->tx_pause_on_err)
220 req.tx_pause_on_err = 1;
221 if (tx_chn->tx_filt_einfo)
222 req.tx_filt_einfo = 1;
223 if (tx_chn->tx_filt_pswords)
224 req.tx_filt_pswords = 1;
225 req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
226 if (tx_chn->tx_supr_tdpkt)
227 req.tx_supr_tdpkt = 1;
228 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
229 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200230 req.tx_atype = tx_chn->common.atype;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200231
232 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
233}
234
235struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
236 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
237{
238 struct k3_udma_glue_tx_channel *tx_chn;
239 int ret;
240
241 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
242 if (!tx_chn)
243 return ERR_PTR(-ENOMEM);
244
245 tx_chn->common.dev = dev;
246 tx_chn->common.swdata_size = cfg->swdata_size;
247 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
248 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
249 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
250 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
251
252 /* parse of udmap channel */
253 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
254 &tx_chn->common, true);
255 if (ret)
256 goto err;
257
258 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
259 tx_chn->common.psdata_size,
260 tx_chn->common.swdata_size);
261
262 /* request and cfg UDMAP TX channel */
263 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
264 if (IS_ERR(tx_chn->udma_tchanx)) {
265 ret = PTR_ERR(tx_chn->udma_tchanx);
266 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
267 goto err;
268 }
269 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
270
271 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
272
273 /* request and cfg rings */
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -0700274 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
275 tx_chn->udma_tchan_id, -1,
276 &tx_chn->ringtx,
277 &tx_chn->ringtxcq);
278 if (ret) {
279 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
Grygorii Strashkod7024192019-12-23 13:04:51 +0200280 goto err;
281 }
282
283 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
284 if (ret) {
285 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
286 goto err;
287 }
288
289 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
290 if (ret) {
291 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
292 goto err;
293 }
294
295 /* request and cfg psi-l */
296 tx_chn->common.src_thread =
297 xudma_dev_get_psil_base(tx_chn->common.udmax) +
298 tx_chn->udma_tchan_id;
299
300 ret = k3_udma_glue_cfg_tx_chn(tx_chn);
301 if (ret) {
302 dev_err(dev, "Failed to cfg tchan %d\n", ret);
303 goto err;
304 }
305
306 ret = xudma_navss_psil_pair(tx_chn->common.udmax,
307 tx_chn->common.src_thread,
308 tx_chn->common.dst_thread);
309 if (ret) {
310 dev_err(dev, "PSI-L request err %d\n", ret);
311 goto err;
312 }
313
314 tx_chn->psil_paired = true;
315
316 /* reset TX RT registers */
317 k3_udma_glue_disable_tx_chn(tx_chn);
318
319 k3_udma_glue_dump_tx_chn(tx_chn);
320
321 return tx_chn;
322
323err:
324 k3_udma_glue_release_tx_chn(tx_chn);
325 return ERR_PTR(ret);
326}
327EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
328
329void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
330{
331 if (tx_chn->psil_paired) {
332 xudma_navss_psil_unpair(tx_chn->common.udmax,
333 tx_chn->common.src_thread,
334 tx_chn->common.dst_thread);
335 tx_chn->psil_paired = false;
336 }
337
338 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
339 xudma_tchan_put(tx_chn->common.udmax,
340 tx_chn->udma_tchanx);
341
342 if (tx_chn->ringtxcq)
343 k3_ringacc_ring_free(tx_chn->ringtxcq);
344
345 if (tx_chn->ringtx)
346 k3_ringacc_ring_free(tx_chn->ringtx);
347}
348EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
349
350int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
351 struct cppi5_host_desc_t *desc_tx,
352 dma_addr_t desc_dma)
353{
354 u32 ringtxcq_id;
355
356 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
357 return -ENOMEM;
358
359 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
360 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
361
362 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
363}
364EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
365
366int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
367 dma_addr_t *desc_dma)
368{
369 int ret;
370
371 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
372 if (!ret)
373 atomic_inc(&tx_chn->free_pkts);
374
375 return ret;
376}
377EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
378
379int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
380{
381 u32 txrt_ctl;
382
383 txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300384 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
Grygorii Strashkod7024192019-12-23 13:04:51 +0200385 txrt_ctl);
386
387 txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300388 UDMA_CHAN_RT_CTL_REG);
Grygorii Strashkod7024192019-12-23 13:04:51 +0200389 txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300390 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
Grygorii Strashkod7024192019-12-23 13:04:51 +0200391 txrt_ctl);
392
393 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
394 return 0;
395}
396EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
397
398void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
399{
400 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
401
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300402 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
Grygorii Strashkod7024192019-12-23 13:04:51 +0200403
404 xudma_tchanrt_write(tx_chn->udma_tchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300405 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
Grygorii Strashkod7024192019-12-23 13:04:51 +0200406 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
407}
408EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
409
410void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
411 bool sync)
412{
413 int i = 0;
414 u32 val;
415
416 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
417
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300418 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
Grygorii Strashkod7024192019-12-23 13:04:51 +0200419 UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
420
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300421 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
Grygorii Strashkod7024192019-12-23 13:04:51 +0200422
423 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
424 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300425 UDMA_CHAN_RT_CTL_REG);
Grygorii Strashkod7024192019-12-23 13:04:51 +0200426 udelay(1);
427 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
428 dev_err(tx_chn->common.dev, "TX tdown timeout\n");
429 break;
430 }
431 i++;
432 }
433
434 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300435 UDMA_CHAN_RT_PEER_RT_EN_REG);
Grygorii Strashkod7024192019-12-23 13:04:51 +0200436 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
437 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
438 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
439}
440EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
441
442void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
443 void *data,
444 void (*cleanup)(void *data, dma_addr_t desc_dma))
445{
446 dma_addr_t desc_dma;
447 int occ_tx, i, ret;
448
449 /* reset TXCQ as it is not input for udma - expected to be empty */
450 if (tx_chn->ringtxcq)
451 k3_ringacc_ring_reset(tx_chn->ringtxcq);
452
453 /*
454 * TXQ reset need to be special way as it is input for udma and its
455 * state cached by udma, so:
456 * 1) save TXQ occ
457 * 2) clean up TXQ and call callback .cleanup() for each desc
458 * 3) reset TXQ in a special way
459 */
460 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
461 dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
462
463 for (i = 0; i < occ_tx; i++) {
464 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
465 if (ret) {
466 dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
467 break;
468 }
469 cleanup(data, desc_dma);
470 }
471
472 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
473}
474EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
475
476u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
477{
478 return tx_chn->common.hdesc_size;
479}
480EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
481
482u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
483{
484 return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
485}
486EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
487
488int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
489{
490 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
491
492 return tx_chn->virq;
493}
494EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
495
496static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
497{
498 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
499 struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
500 int ret;
501
502 memset(&req, 0, sizeof(req));
503
504 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
505 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
506 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
507 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200508 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
509 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200510
511 req.nav_id = tisci_rm->tisci_dev_id;
512 req.index = rx_chn->udma_rchan_id;
513 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
514 /*
515 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
516 * and udmax impl, so just configure it to invalid value.
517 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
518 */
519 req.rxcq_qnum = 0xFFFF;
520 if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
521 /* Default flow + extra ones */
522 req.flowid_start = rx_chn->flow_id_base;
523 req.flowid_cnt = rx_chn->flow_num;
524 }
525 req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
Peter Ujfalusi0ebcf1a2020-02-18 16:31:26 +0200526 req.rx_atype = rx_chn->common.atype;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200527
528 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
529 if (ret)
530 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
531 rx_chn->udma_rchan_id, ret);
532
533 return ret;
534}
535
536static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
537 u32 flow_num)
538{
539 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
540
541 if (IS_ERR_OR_NULL(flow->udma_rflow))
542 return;
543
544 if (flow->ringrxfdq)
545 k3_ringacc_ring_free(flow->ringrxfdq);
546
547 if (flow->ringrx)
548 k3_ringacc_ring_free(flow->ringrx);
549
550 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
551 flow->udma_rflow = NULL;
552 rx_chn->flows_ready--;
553}
554
555static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
556 u32 flow_idx,
557 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
558{
559 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
560 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
561 struct device *dev = rx_chn->common.dev;
562 struct ti_sci_msg_rm_udmap_flow_cfg req;
563 int rx_ring_id;
564 int rx_ringfdq_id;
565 int ret = 0;
566
567 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
568 flow->udma_rflow_id);
569 if (IS_ERR(flow->udma_rflow)) {
570 ret = PTR_ERR(flow->udma_rflow);
571 dev_err(dev, "UDMAX rflow get err %d\n", ret);
Christophe JAILLET018af9b2020-03-18 20:12:09 +0100572 return ret;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200573 }
574
575 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
Christophe JAILLET018af9b2020-03-18 20:12:09 +0100576 ret = -ENODEV;
577 goto err_rflow_put;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200578 }
579
580 /* request and cfg rings */
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -0700581 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -0700582 flow_cfg->ring_rxfdq0_id,
Peter Ujfalusi6259c842020-09-11 21:47:39 -0700583 flow_cfg->ring_rxq_id,
Peter Ujfalusi4927b1a2020-07-24 14:20:24 -0700584 &flow->ringrxfdq,
585 &flow->ringrx);
586 if (ret) {
587 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
Christophe JAILLET018af9b2020-03-18 20:12:09 +0100588 goto err_rflow_put;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200589 }
590
Grygorii Strashkod7024192019-12-23 13:04:51 +0200591 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
592 if (ret) {
593 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
Christophe JAILLET018af9b2020-03-18 20:12:09 +0100594 goto err_ringrxfdq_free;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200595 }
596
597 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
598 if (ret) {
599 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
Christophe JAILLET018af9b2020-03-18 20:12:09 +0100600 goto err_ringrxfdq_free;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200601 }
602
603 if (rx_chn->remote) {
604 rx_ring_id = TI_SCI_RESOURCE_NULL;
605 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
606 } else {
607 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
608 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
609 }
610
611 memset(&req, 0, sizeof(req));
612
613 req.valid_params =
614 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
615 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
616 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
617 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
618 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
619 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
620 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
621 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
622 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
623 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
624 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
625 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
626 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
627 req.nav_id = tisci_rm->tisci_dev_id;
628 req.flow_index = flow->udma_rflow_id;
629 if (rx_chn->common.epib)
630 req.rx_einfo_present = 1;
631 if (rx_chn->common.psdata_size)
632 req.rx_psinfo_present = 1;
633 if (flow_cfg->rx_error_handling)
634 req.rx_error_handling = 1;
635 req.rx_desc_type = 0;
636 req.rx_dest_qnum = rx_ring_id;
637 req.rx_src_tag_hi_sel = 0;
638 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
639 req.rx_dest_tag_hi_sel = 0;
640 req.rx_dest_tag_lo_sel = 0;
641 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
642 req.rx_fdq1_qnum = rx_ringfdq_id;
643 req.rx_fdq2_qnum = rx_ringfdq_id;
644 req.rx_fdq3_qnum = rx_ringfdq_id;
645
646 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
647 if (ret) {
648 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
649 ret);
Christophe JAILLET018af9b2020-03-18 20:12:09 +0100650 goto err_ringrxfdq_free;
Grygorii Strashkod7024192019-12-23 13:04:51 +0200651 }
652
653 rx_chn->flows_ready++;
654 dev_dbg(dev, "flow%d config done. ready:%d\n",
655 flow->udma_rflow_id, rx_chn->flows_ready);
656
657 return 0;
Christophe JAILLET018af9b2020-03-18 20:12:09 +0100658
659err_ringrxfdq_free:
660 k3_ringacc_ring_free(flow->ringrxfdq);
Christophe JAILLET018af9b2020-03-18 20:12:09 +0100661 k3_ringacc_ring_free(flow->ringrx);
662
663err_rflow_put:
664 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
665 flow->udma_rflow = NULL;
666
Grygorii Strashkod7024192019-12-23 13:04:51 +0200667 return ret;
668}
669
670static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
671{
672 struct device *dev = chn->common.dev;
673
674 dev_dbg(dev, "dump_rx_chn:\n"
675 "udma_rchan_id: %d\n"
676 "src_thread: %08x\n"
677 "dst_thread: %08x\n"
678 "epib: %d\n"
679 "hdesc_size: %u\n"
680 "psdata_size: %u\n"
681 "swdata_size: %u\n"
682 "flow_id_base: %d\n"
683 "flow_num: %d\n",
684 chn->udma_rchan_id,
685 chn->common.src_thread,
686 chn->common.dst_thread,
687 chn->common.epib,
688 chn->common.hdesc_size,
689 chn->common.psdata_size,
690 chn->common.swdata_size,
691 chn->flow_id_base,
692 chn->flow_num);
693}
694
695static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
696 char *mark)
697{
698 struct device *dev = chn->common.dev;
699
700 dev_dbg(dev, "=== dump ===> %s\n", mark);
701
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300702 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
703 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
704 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
Grygorii Strashkod7024192019-12-23 13:04:51 +0200705 xudma_rchanrt_read(chn->udma_rchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +0300706 UDMA_CHAN_RT_PEER_RT_EN_REG));
707 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
708 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
709 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
710 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
711 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
712 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
Grygorii Strashkod7024192019-12-23 13:04:51 +0200713}
714
715static int
716k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
717 struct k3_udma_glue_rx_channel_cfg *cfg)
718{
719 int ret;
720
721 /* default rflow */
722 if (cfg->flow_id_use_rxchan_id)
723 return 0;
724
725 /* not a GP rflows */
726 if (rx_chn->flow_id_base != -1 &&
727 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
728 return 0;
729
730 /* Allocate range of GP rflows */
731 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
732 rx_chn->flow_id_base,
733 rx_chn->flow_num);
734 if (ret < 0) {
735 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
736 rx_chn->flow_id_base, rx_chn->flow_num, ret);
737 return ret;
738 }
739 rx_chn->flow_id_base = ret;
740
741 return 0;
742}
743
744static struct k3_udma_glue_rx_channel *
745k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
746 struct k3_udma_glue_rx_channel_cfg *cfg)
747{
748 struct k3_udma_glue_rx_channel *rx_chn;
749 int ret, i;
750
751 if (cfg->flow_id_num <= 0)
752 return ERR_PTR(-EINVAL);
753
754 if (cfg->flow_id_num != 1 &&
755 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
756 return ERR_PTR(-EINVAL);
757
758 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
759 if (!rx_chn)
760 return ERR_PTR(-ENOMEM);
761
762 rx_chn->common.dev = dev;
763 rx_chn->common.swdata_size = cfg->swdata_size;
764 rx_chn->remote = false;
765
766 /* parse of udmap channel */
767 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
768 &rx_chn->common, false);
769 if (ret)
770 goto err;
771
772 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
773 rx_chn->common.psdata_size,
774 rx_chn->common.swdata_size);
775
776 /* request and cfg UDMAP RX channel */
777 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
778 if (IS_ERR(rx_chn->udma_rchanx)) {
779 ret = PTR_ERR(rx_chn->udma_rchanx);
780 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
781 goto err;
782 }
783 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
784
785 rx_chn->flow_num = cfg->flow_id_num;
786 rx_chn->flow_id_base = cfg->flow_id_base;
787
788 /* Use RX channel id as flow id: target dev can't generate flow_id */
789 if (cfg->flow_id_use_rxchan_id)
790 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
791
792 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
793 sizeof(*rx_chn->flows), GFP_KERNEL);
794 if (!rx_chn->flows) {
795 ret = -ENOMEM;
796 goto err;
797 }
798
799 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
800 if (ret)
801 goto err;
802
803 for (i = 0; i < rx_chn->flow_num; i++)
804 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
805
806 /* request and cfg psi-l */
807 rx_chn->common.dst_thread =
808 xudma_dev_get_psil_base(rx_chn->common.udmax) +
809 rx_chn->udma_rchan_id;
810
811 ret = k3_udma_glue_cfg_rx_chn(rx_chn);
812 if (ret) {
813 dev_err(dev, "Failed to cfg rchan %d\n", ret);
814 goto err;
815 }
816
817 /* init default RX flow only if flow_num = 1 */
818 if (cfg->def_flow_cfg) {
819 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
820 if (ret)
821 goto err;
822 }
823
824 ret = xudma_navss_psil_pair(rx_chn->common.udmax,
825 rx_chn->common.src_thread,
826 rx_chn->common.dst_thread);
827 if (ret) {
828 dev_err(dev, "PSI-L request err %d\n", ret);
829 goto err;
830 }
831
832 rx_chn->psil_paired = true;
833
834 /* reset RX RT registers */
835 k3_udma_glue_disable_rx_chn(rx_chn);
836
837 k3_udma_glue_dump_rx_chn(rx_chn);
838
839 return rx_chn;
840
841err:
842 k3_udma_glue_release_rx_chn(rx_chn);
843 return ERR_PTR(ret);
844}
845
846static struct k3_udma_glue_rx_channel *
847k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
848 struct k3_udma_glue_rx_channel_cfg *cfg)
849{
850 struct k3_udma_glue_rx_channel *rx_chn;
851 int ret, i;
852
853 if (cfg->flow_id_num <= 0 ||
854 cfg->flow_id_use_rxchan_id ||
855 cfg->def_flow_cfg ||
856 cfg->flow_id_base < 0)
857 return ERR_PTR(-EINVAL);
858
859 /*
860 * Remote RX channel is under control of Remote CPU core, so
861 * Linux can only request and manipulate by dedicated RX flows
862 */
863
864 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
865 if (!rx_chn)
866 return ERR_PTR(-ENOMEM);
867
868 rx_chn->common.dev = dev;
869 rx_chn->common.swdata_size = cfg->swdata_size;
870 rx_chn->remote = true;
871 rx_chn->udma_rchan_id = -1;
872 rx_chn->flow_num = cfg->flow_id_num;
873 rx_chn->flow_id_base = cfg->flow_id_base;
874 rx_chn->psil_paired = false;
875
876 /* parse of udmap channel */
877 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
878 &rx_chn->common, false);
879 if (ret)
880 goto err;
881
882 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
883 rx_chn->common.psdata_size,
884 rx_chn->common.swdata_size);
885
886 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
887 sizeof(*rx_chn->flows), GFP_KERNEL);
888 if (!rx_chn->flows) {
889 ret = -ENOMEM;
890 goto err;
891 }
892
893 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
894 if (ret)
895 goto err;
896
897 for (i = 0; i < rx_chn->flow_num; i++)
898 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
899
900 k3_udma_glue_dump_rx_chn(rx_chn);
901
902 return rx_chn;
903
904err:
905 k3_udma_glue_release_rx_chn(rx_chn);
906 return ERR_PTR(ret);
907}
908
909struct k3_udma_glue_rx_channel *
910k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
911 struct k3_udma_glue_rx_channel_cfg *cfg)
912{
913 if (cfg->remote)
914 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
915 else
916 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
917}
918EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
919
920void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
921{
922 int i;
923
924 if (IS_ERR_OR_NULL(rx_chn->common.udmax))
925 return;
926
927 if (rx_chn->psil_paired) {
928 xudma_navss_psil_unpair(rx_chn->common.udmax,
929 rx_chn->common.src_thread,
930 rx_chn->common.dst_thread);
931 rx_chn->psil_paired = false;
932 }
933
934 for (i = 0; i < rx_chn->flow_num; i++)
935 k3_udma_glue_release_rx_flow(rx_chn, i);
936
937 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
938 xudma_free_gp_rflow_range(rx_chn->common.udmax,
939 rx_chn->flow_id_base,
940 rx_chn->flow_num);
941
942 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
943 xudma_rchan_put(rx_chn->common.udmax,
944 rx_chn->udma_rchanx);
945}
946EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
947
948int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
949 u32 flow_idx,
950 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
951{
952 if (flow_idx >= rx_chn->flow_num)
953 return -EINVAL;
954
955 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
956}
957EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
958
959u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
960 u32 flow_idx)
961{
962 struct k3_udma_glue_rx_flow *flow;
963
964 if (flow_idx >= rx_chn->flow_num)
965 return -EINVAL;
966
967 flow = &rx_chn->flows[flow_idx];
968
969 return k3_ringacc_get_ring_id(flow->ringrxfdq);
970}
971EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
972
973u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
974{
975 return rx_chn->flow_id_base;
976}
977EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
978
979int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
980 u32 flow_idx)
981{
982 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
983 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
984 struct device *dev = rx_chn->common.dev;
985 struct ti_sci_msg_rm_udmap_flow_cfg req;
986 int rx_ring_id;
987 int rx_ringfdq_id;
988 int ret = 0;
989
990 if (!rx_chn->remote)
991 return -EINVAL;
992
993 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
994 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
995
996 memset(&req, 0, sizeof(req));
997
998 req.valid_params =
999 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1000 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1001 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1002 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1003 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1004 req.nav_id = tisci_rm->tisci_dev_id;
1005 req.flow_index = flow->udma_rflow_id;
1006 req.rx_dest_qnum = rx_ring_id;
1007 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1008 req.rx_fdq1_qnum = rx_ringfdq_id;
1009 req.rx_fdq2_qnum = rx_ringfdq_id;
1010 req.rx_fdq3_qnum = rx_ringfdq_id;
1011
1012 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1013 if (ret) {
1014 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1015 ret);
1016 }
1017
1018 return ret;
1019}
1020EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1021
1022int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1023 u32 flow_idx)
1024{
1025 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1026 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1027 struct device *dev = rx_chn->common.dev;
1028 struct ti_sci_msg_rm_udmap_flow_cfg req;
1029 int ret = 0;
1030
1031 if (!rx_chn->remote)
1032 return -EINVAL;
1033
1034 memset(&req, 0, sizeof(req));
1035 req.valid_params =
1036 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1037 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1038 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1039 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1040 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1041 req.nav_id = tisci_rm->tisci_dev_id;
1042 req.flow_index = flow->udma_rflow_id;
1043 req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1044 req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1045 req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1046 req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1047 req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1048
1049 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1050 if (ret) {
1051 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1052 ret);
1053 }
1054
1055 return ret;
1056}
1057EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1058
1059int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1060{
1061 u32 rxrt_ctl;
1062
1063 if (rx_chn->remote)
1064 return -EINVAL;
1065
1066 if (rx_chn->flows_ready < rx_chn->flow_num)
1067 return -EINVAL;
1068
1069 rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03001070 UDMA_CHAN_RT_CTL_REG);
Grygorii Strashkod7024192019-12-23 13:04:51 +02001071 rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03001072 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
Grygorii Strashkod7024192019-12-23 13:04:51 +02001073 rxrt_ctl);
1074
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03001075 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
Grygorii Strashkod7024192019-12-23 13:04:51 +02001076 UDMA_PEER_RT_EN_ENABLE);
1077
1078 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1079 return 0;
1080}
1081EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1082
1083void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1084{
1085 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1086
1087 xudma_rchanrt_write(rx_chn->udma_rchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03001088 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1089 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
Grygorii Strashkod7024192019-12-23 13:04:51 +02001090
1091 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1092}
1093EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1094
1095void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1096 bool sync)
1097{
1098 int i = 0;
1099 u32 val;
1100
1101 if (rx_chn->remote)
1102 return;
1103
1104 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1105
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03001106 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
Grygorii Strashkod7024192019-12-23 13:04:51 +02001107 UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1108
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03001109 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
Grygorii Strashkod7024192019-12-23 13:04:51 +02001110
1111 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1112 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03001113 UDMA_CHAN_RT_CTL_REG);
Grygorii Strashkod7024192019-12-23 13:04:51 +02001114 udelay(1);
1115 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1116 dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1117 break;
1118 }
1119 i++;
1120 }
1121
1122 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
Peter Ujfalusibc7e5522020-07-07 13:23:50 +03001123 UDMA_CHAN_RT_PEER_RT_EN_REG);
Grygorii Strashkod7024192019-12-23 13:04:51 +02001124 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1125 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1126 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1127}
1128EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1129
1130void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1131 u32 flow_num, void *data,
1132 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1133{
1134 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1135 struct device *dev = rx_chn->common.dev;
1136 dma_addr_t desc_dma;
1137 int occ_rx, i, ret;
1138
1139 /* reset RXCQ as it is not input for udma - expected to be empty */
1140 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1141 dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1142 if (flow->ringrx)
1143 k3_ringacc_ring_reset(flow->ringrx);
1144
1145 /* Skip RX FDQ in case one FDQ is used for the set of flows */
1146 if (skip_fdq)
1147 return;
1148
1149 /*
1150 * RX FDQ reset need to be special way as it is input for udma and its
1151 * state cached by udma, so:
1152 * 1) save RX FDQ occ
1153 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1154 * 3) reset RX FDQ in a special way
1155 */
1156 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1157 dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1158
1159 for (i = 0; i < occ_rx; i++) {
1160 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1161 if (ret) {
1162 dev_err(dev, "RX reset pop %d\n", ret);
1163 break;
1164 }
1165 cleanup(data, desc_dma);
1166 }
1167
1168 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1169}
1170EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1171
1172int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1173 u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1174 dma_addr_t desc_dma)
1175{
1176 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1177
1178 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1179}
1180EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1181
1182int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1183 u32 flow_num, dma_addr_t *desc_dma)
1184{
1185 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1186
1187 return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1188}
1189EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1190
1191int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1192 u32 flow_num)
1193{
1194 struct k3_udma_glue_rx_flow *flow;
1195
1196 flow = &rx_chn->flows[flow_num];
1197
1198 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1199
1200 return flow->virq;
1201}
1202EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);