blob: aada84f40723c3328ffacc08422961c17eb2dc75 [file] [log] [blame]
Grygorii Strashkod7024192019-12-23 13:04:51 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
8{
9 return navss_psil_pair(ud, src_thread, dst_thread);
10}
11EXPORT_SYMBOL(xudma_navss_psil_pair);
12
13int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
14{
15 return navss_psil_unpair(ud, src_thread, dst_thread);
16}
17EXPORT_SYMBOL(xudma_navss_psil_unpair);
18
19struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
20{
21 struct device_node *udma_node = np;
22 struct platform_device *pdev;
23 struct udma_dev *ud;
24
25 if (property) {
26 udma_node = of_parse_phandle(np, property, 0);
27 if (!udma_node) {
28 pr_err("UDMA node is not found\n");
29 return ERR_PTR(-ENODEV);
30 }
31 }
32
33 pdev = of_find_device_by_node(udma_node);
34 if (!pdev) {
35 pr_debug("UDMA device not found\n");
36 return ERR_PTR(-EPROBE_DEFER);
37 }
38
39 if (np != udma_node)
40 of_node_put(udma_node);
41
42 ud = platform_get_drvdata(pdev);
43 if (!ud) {
44 pr_debug("UDMA has not been probed\n");
Yu Kuai1438cde2020-06-18 21:01:10 +080045 put_device(&pdev->dev);
Grygorii Strashkod7024192019-12-23 13:04:51 +020046 return ERR_PTR(-EPROBE_DEFER);
47 }
48
49 return ud;
50}
51EXPORT_SYMBOL(of_xudma_dev_get);
52
Peter Ujfalusi426506a2020-12-08 11:04:24 +020053struct device *xudma_get_device(struct udma_dev *ud)
54{
55 return ud->dev;
56}
57EXPORT_SYMBOL(xudma_get_device);
58
Peter Ujfalusiaa8a4c42020-12-08 11:04:25 +020059struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud)
60{
61 return ud->ringacc;
62}
63EXPORT_SYMBOL(xudma_get_ringacc);
64
Grygorii Strashkod7024192019-12-23 13:04:51 +020065u32 xudma_dev_get_psil_base(struct udma_dev *ud)
66{
67 return ud->psil_base;
68}
69EXPORT_SYMBOL(xudma_dev_get_psil_base);
70
71struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud)
72{
73 return &ud->tisci_rm;
74}
75EXPORT_SYMBOL(xudma_dev_get_tisci_rm);
76
77int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
78{
79 return __udma_alloc_gp_rflow_range(ud, from, cnt);
80}
81EXPORT_SYMBOL(xudma_alloc_gp_rflow_range);
82
83int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
84{
85 return __udma_free_gp_rflow_range(ud, from, cnt);
86}
87EXPORT_SYMBOL(xudma_free_gp_rflow_range);
88
89bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
90{
Peter Ujfalusid2abc982020-12-08 11:04:39 +020091 if (!ud->rflow_gp_map)
92 return false;
93
Grygorii Strashkod7024192019-12-23 13:04:51 +020094 return !test_bit(id, ud->rflow_gp_map);
95}
96EXPORT_SYMBOL(xudma_rflow_is_gp);
97
98#define XUDMA_GET_PUT_RESOURCE(res) \
99struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \
100{ \
Arnd Bergmann76866412020-10-26 17:01:15 +0100101 return __udma_reserve_##res(ud, UDMA_TP_NORMAL, id); \
Grygorii Strashkod7024192019-12-23 13:04:51 +0200102} \
103EXPORT_SYMBOL(xudma_##res##_get); \
104 \
105void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \
106{ \
107 clear_bit(p->id, ud->res##_map); \
108} \
109EXPORT_SYMBOL(xudma_##res##_put)
110XUDMA_GET_PUT_RESOURCE(tchan);
111XUDMA_GET_PUT_RESOURCE(rchan);
112
113struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id)
114{
115 return __udma_get_rflow(ud, id);
116}
117EXPORT_SYMBOL(xudma_rflow_get);
118
119void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
120{
121 __udma_put_rflow(ud, p);
122}
123EXPORT_SYMBOL(xudma_rflow_put);
124
Peter Ujfalusid2abc982020-12-08 11:04:39 +0200125int xudma_get_rflow_ring_offset(struct udma_dev *ud)
126{
127 return ud->tflow_cnt;
128}
129EXPORT_SYMBOL(xudma_get_rflow_ring_offset);
130
Grygorii Strashkod7024192019-12-23 13:04:51 +0200131#define XUDMA_GET_RESOURCE_ID(res) \
132int xudma_##res##_get_id(struct udma_##res *p) \
133{ \
134 return p->id; \
135} \
136EXPORT_SYMBOL(xudma_##res##_get_id)
137XUDMA_GET_RESOURCE_ID(tchan);
138XUDMA_GET_RESOURCE_ID(rchan);
139XUDMA_GET_RESOURCE_ID(rflow);
140
141/* Exported register access functions */
142#define XUDMA_RT_IO_FUNCTIONS(res) \
143u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \
144{ \
Peter Ujfalusi67414f82020-07-07 13:23:51 +0300145 if (!p) \
146 return 0; \
147 return udma_read(p->reg_rt, reg); \
Grygorii Strashkod7024192019-12-23 13:04:51 +0200148} \
149EXPORT_SYMBOL(xudma_##res##rt_read); \
150 \
151void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
152{ \
Peter Ujfalusi67414f82020-07-07 13:23:51 +0300153 if (!p) \
154 return; \
155 udma_write(p->reg_rt, reg, val); \
Grygorii Strashkod7024192019-12-23 13:04:51 +0200156} \
157EXPORT_SYMBOL(xudma_##res##rt_write)
158XUDMA_RT_IO_FUNCTIONS(tchan);
159XUDMA_RT_IO_FUNCTIONS(rchan);
Vignesh Raghavendra5b657812020-12-08 11:04:40 +0200160
161int xudma_is_pktdma(struct udma_dev *ud)
162{
163 return ud->match_data->type == DMA_TYPE_PKTDMA;
164}
165EXPORT_SYMBOL(xudma_is_pktdma);
166
167int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id)
168{
169 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
170
171 return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id +
172 oes->pktdma_tchan_flow);
173}
174EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
175
176int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id)
177{
178 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
179
180 return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id +
181 oes->pktdma_rchan_flow);
182}
183EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);