blob: 119164abcb41ac339a93ab98ecceb3ae4fefb004 [file] [log] [blame]
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * TI K3 NAVSS Ring Accelerator subsystem driver
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/io.h>
10#include <linux/init.h>
11#include <linux/of.h>
12#include <linux/platform_device.h>
Grygorii Strashko95e7be02020-09-11 21:29:56 -070013#include <linux/sys_soc.h>
Grygorii Strashko3277e8a2020-01-15 10:07:27 -080014#include <linux/soc/ti/k3-ringacc.h>
15#include <linux/soc/ti/ti_sci_protocol.h>
16#include <linux/soc/ti/ti_sci_inta_msi.h>
17#include <linux/of_irq.h>
18#include <linux/irqdomain.h>
19
20static LIST_HEAD(k3_ringacc_list);
21static DEFINE_MUTEX(k3_ringacc_list_lock);
22
23#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
24
25/**
26 * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
27 *
28 * @resv_16: Reserved
29 * @db: Ring Doorbell Register
30 * @resv_4: Reserved
31 * @occ: Ring Occupancy Register
32 * @indx: Ring Current Index Register
33 * @hwocc: Ring Hardware Occupancy Register
34 * @hwindx: Ring Hardware Current Index Register
35 */
36struct k3_ring_rt_regs {
37 u32 resv_16[4];
38 u32 db;
39 u32 resv_4[1];
40 u32 occ;
41 u32 indx;
42 u32 hwocc;
43 u32 hwindx;
44};
45
46#define K3_RINGACC_RT_REGS_STEP 0x1000
47
48/**
49 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
50 *
51 * @head_data: Ring Head Entry Data Registers
52 * @tail_data: Ring Tail Entry Data Registers
53 * @peek_head_data: Ring Peek Head Entry Data Regs
54 * @peek_tail_data: Ring Peek Tail Entry Data Regs
55 */
56struct k3_ring_fifo_regs {
57 u32 head_data[128];
58 u32 tail_data[128];
59 u32 peek_head_data[128];
60 u32 peek_tail_data[128];
61};
62
63/**
64 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
65 *
66 * @revision: Revision Register
67 * @config: Config Register
68 */
69struct k3_ringacc_proxy_gcfg_regs {
70 u32 revision;
71 u32 config;
72};
73
74#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
75
76/**
77 * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
78 *
79 * @control: Proxy Control Register
80 * @status: Proxy Status Register
81 * @resv_512: Reserved
82 * @data: Proxy Data Register
83 */
84struct k3_ringacc_proxy_target_regs {
85 u32 control;
86 u32 status;
87 u8 resv_512[504];
88 u32 data[128];
89};
90
91#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
92#define K3_RINGACC_PROXY_NOT_USED (-1)
93
94enum k3_ringacc_proxy_access_mode {
95 PROXY_ACCESS_MODE_HEAD = 0,
96 PROXY_ACCESS_MODE_TAIL = 1,
97 PROXY_ACCESS_MODE_PEEK_HEAD = 2,
98 PROXY_ACCESS_MODE_PEEK_TAIL = 3,
99};
100
101#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
102#define K3_RINGACC_FIFO_REGS_STEP 0x1000
103#define K3_RINGACC_MAX_DB_RING_CNT (127U)
104
105struct k3_ring_ops {
106 int (*push_tail)(struct k3_ring *ring, void *elm);
107 int (*push_head)(struct k3_ring *ring, void *elm);
108 int (*pop_tail)(struct k3_ring *ring, void *elm);
109 int (*pop_head)(struct k3_ring *ring, void *elm);
110};
111
112/**
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700113 * struct k3_ring_state - Internal state tracking structure
114 *
115 * @free: Number of free entries
116 * @occ: Occupancy
117 * @windex: Write index
118 * @rindex: Read index
119 */
120struct k3_ring_state {
121 u32 free;
122 u32 occ;
123 u32 windex;
124 u32 rindex;
125};
126
127/**
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800128 * struct k3_ring - RA Ring descriptor
129 *
130 * @rt: Ring control/status registers
131 * @fifos: Ring queues registers
132 * @proxy: Ring Proxy Datapath registers
133 * @ring_mem_dma: Ring buffer dma address
134 * @ring_mem_virt: Ring buffer virt address
135 * @ops: Ring operations
136 * @size: Ring size in elements
137 * @elm_size: Size of the ring element
138 * @mode: Ring mode
139 * @flags: flags
Lee Jones50883aff2020-11-21 19:22:01 -0800140 * @state: Ring state
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800141 * @ring_id: Ring Id
142 * @parent: Pointer on struct @k3_ringacc
143 * @use_count: Use count for shared rings
144 * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700145 * @dma_dev: device to be used for DMA API (allocation, mapping)
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800146 */
147struct k3_ring {
148 struct k3_ring_rt_regs __iomem *rt;
149 struct k3_ring_fifo_regs __iomem *fifos;
150 struct k3_ringacc_proxy_target_regs __iomem *proxy;
151 dma_addr_t ring_mem_dma;
152 void *ring_mem_virt;
153 struct k3_ring_ops *ops;
154 u32 size;
155 enum k3_ring_size elm_size;
156 enum k3_ring_mode mode;
157 u32 flags;
158#define K3_RING_FLAG_BUSY BIT(1)
159#define K3_RING_FLAG_SHARED BIT(2)
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700160 struct k3_ring_state state;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800161 u32 ring_id;
162 struct k3_ringacc *parent;
163 u32 use_count;
164 int proxy_id;
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700165 struct device *dma_dev;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800166};
167
Grygorii Strashko40a2a7c2020-07-24 14:19:43 -0700168struct k3_ringacc_ops {
169 int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc);
170};
171
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800172/**
173 * struct k3_ringacc - Rings accelerator descriptor
174 *
175 * @dev: pointer on RA device
176 * @proxy_gcfg: RA proxy global config registers
177 * @proxy_target_base: RA proxy datapath region
178 * @num_rings: number of ring in RA
179 * @rings_inuse: bitfield for ring usage tracking
180 * @rm_gp_range: general purpose rings range from tisci
181 * @dma_ring_reset_quirk: DMA reset w/a enable
182 * @num_proxies: number of RA proxies
183 * @proxy_inuse: bitfield for proxy usage tracking
184 * @rings: array of rings descriptors (struct @k3_ring)
185 * @list: list of RAs in the system
186 * @req_lock: protect rings allocation
187 * @tisci: pointer ti-sci handle
188 * @tisci_ring_ops: ti-sci rings ops
189 * @tisci_dev_id: ti-sci device id
Grygorii Strashko40a2a7c2020-07-24 14:19:43 -0700190 * @ops: SoC specific ringacc operation
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800191 */
192struct k3_ringacc {
193 struct device *dev;
194 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
195 void __iomem *proxy_target_base;
196 u32 num_rings; /* number of rings in Ringacc module */
197 unsigned long *rings_inuse;
198 struct ti_sci_resource *rm_gp_range;
199
200 bool dma_ring_reset_quirk;
201 u32 num_proxies;
202 unsigned long *proxy_inuse;
203
204 struct k3_ring *rings;
205 struct list_head list;
206 struct mutex req_lock; /* protect rings allocation */
207
208 const struct ti_sci_handle *tisci;
209 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
210 u32 tisci_dev_id;
Grygorii Strashko40a2a7c2020-07-24 14:19:43 -0700211
212 const struct k3_ringacc_ops *ops;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800213};
214
Grygorii Strashko95e7be02020-09-11 21:29:56 -0700215/**
216 * struct k3_ringacc - Rings accelerator SoC data
217 *
218 * @dma_ring_reset_quirk: DMA reset w/a enable
219 */
220struct k3_ringacc_soc_data {
221 unsigned dma_ring_reset_quirk:1;
222};
223
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800224static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
225{
226 return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
227 (4 << ring->elm_size);
228}
229
230static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
231{
232 return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
233}
234
235static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
236static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
237
238static struct k3_ring_ops k3_ring_mode_ring_ops = {
239 .push_tail = k3_ringacc_ring_push_mem,
240 .pop_head = k3_ringacc_ring_pop_mem,
241};
242
243static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
244static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
245static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
246static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
247
248static struct k3_ring_ops k3_ring_mode_msg_ops = {
249 .push_tail = k3_ringacc_ring_push_io,
250 .push_head = k3_ringacc_ring_push_head_io,
251 .pop_tail = k3_ringacc_ring_pop_tail_io,
252 .pop_head = k3_ringacc_ring_pop_io,
253};
254
255static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
256static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
257static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
258static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
259
260static struct k3_ring_ops k3_ring_mode_proxy_ops = {
261 .push_tail = k3_ringacc_ring_push_tail_proxy,
262 .push_head = k3_ringacc_ring_push_head_proxy,
263 .pop_tail = k3_ringacc_ring_pop_tail_proxy,
264 .pop_head = k3_ringacc_ring_pop_head_proxy,
265};
266
267static void k3_ringacc_ring_dump(struct k3_ring *ring)
268{
269 struct device *dev = ring->parent->dev;
270
271 dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
272 dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
273 &ring->ring_mem_dma);
274 dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
275 ring->elm_size, ring->size, ring->mode, ring->proxy_id);
Grygorii Strashko175e6632020-07-24 14:18:34 -0700276 dev_dbg(dev, "dump flags %08X\n", ring->flags);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800277
278 dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
279 dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
280 dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
281 dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
282 dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
283
284 if (ring->ring_mem_virt)
285 print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
286 16, 1, ring->ring_mem_virt, 16 * 8, false);
287}
288
289struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
290 int id, u32 flags)
291{
292 int proxy_id = K3_RINGACC_PROXY_NOT_USED;
293
294 mutex_lock(&ringacc->req_lock);
295
296 if (id == K3_RINGACC_RING_ID_ANY) {
297 /* Request for any general purpose ring */
298 struct ti_sci_resource_desc *gp_rings =
299 &ringacc->rm_gp_range->desc[0];
300 unsigned long size;
301
302 size = gp_rings->start + gp_rings->num;
303 id = find_next_zero_bit(ringacc->rings_inuse, size,
304 gp_rings->start);
305 if (id == size)
306 goto error;
307 } else if (id < 0) {
308 goto error;
309 }
310
311 if (test_bit(id, ringacc->rings_inuse) &&
312 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
313 goto error;
314 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
315 goto out;
316
317 if (flags & K3_RINGACC_RING_USE_PROXY) {
318 proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
319 ringacc->num_proxies, 0);
320 if (proxy_id == ringacc->num_proxies)
321 goto error;
322 }
323
324 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
325 set_bit(proxy_id, ringacc->proxy_inuse);
326 ringacc->rings[id].proxy_id = proxy_id;
327 dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
328 proxy_id);
329 } else {
330 dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
331 }
332
333 set_bit(id, ringacc->rings_inuse);
334out:
335 ringacc->rings[id].use_count++;
336 mutex_unlock(&ringacc->req_lock);
337 return &ringacc->rings[id];
338
339error:
340 mutex_unlock(&ringacc->req_lock);
341 return NULL;
342}
343EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
344
Grygorii Strashko43148b12020-07-24 14:19:12 -0700345int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
346 int fwd_id, int compl_id,
347 struct k3_ring **fwd_ring,
348 struct k3_ring **compl_ring)
349{
350 int ret = 0;
351
352 if (!fwd_ring || !compl_ring)
353 return -EINVAL;
354
355 *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
356 if (!(*fwd_ring))
357 return -ENODEV;
358
359 *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0);
360 if (!(*compl_ring)) {
361 k3_ringacc_ring_free(*fwd_ring);
362 ret = -ENODEV;
363 }
364
365 return ret;
366}
367EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
368
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800369static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
370{
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700371 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800372 struct k3_ringacc *ringacc = ring->parent;
373 int ret;
374
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700375 ring_cfg.nav_id = ringacc->tisci_dev_id;
376 ring_cfg.index = ring->ring_id;
377 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
378 ring_cfg.count = ring->size;
379
380 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800381 if (ret)
382 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
383 ret, ring->ring_id);
384}
385
386void k3_ringacc_ring_reset(struct k3_ring *ring)
387{
388 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
389 return;
390
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700391 memset(&ring->state, 0, sizeof(ring->state));
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800392
393 k3_ringacc_ring_reset_sci(ring);
394}
395EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
396
397static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
398 enum k3_ring_mode mode)
399{
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700400 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800401 struct k3_ringacc *ringacc = ring->parent;
402 int ret;
403
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700404 ring_cfg.nav_id = ringacc->tisci_dev_id;
405 ring_cfg.index = ring->ring_id;
406 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
407 ring_cfg.mode = mode;
408
409 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800410 if (ret)
411 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
412 ret, ring->ring_id);
413}
414
415void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
416{
417 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
418 return;
419
420 if (!ring->parent->dma_ring_reset_quirk)
421 goto reset;
422
423 if (!occ)
424 occ = readl(&ring->rt->occ);
425
426 if (occ) {
427 u32 db_ring_cnt, db_ring_cnt_cur;
428
429 dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
430 ring->ring_id, occ);
431 /* TI-SCI ring reset */
432 k3_ringacc_ring_reset_sci(ring);
433
434 /*
435 * Setup the ring in ring/doorbell mode (if not already in this
436 * mode)
437 */
438 if (ring->mode != K3_RINGACC_RING_MODE_RING)
439 k3_ringacc_ring_reconfig_qmode_sci(
440 ring, K3_RINGACC_RING_MODE_RING);
441 /*
442 * Ring the doorbell 2**22 – ringOcc times.
443 * This will wrap the internal UDMAP ring state occupancy
444 * counter (which is 21-bits wide) to 0.
445 */
446 db_ring_cnt = (1U << 22) - occ;
447
448 while (db_ring_cnt != 0) {
449 /*
450 * Ring the doorbell with the maximum count each
451 * iteration if possible to minimize the total
452 * of writes
453 */
454 if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
455 db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
456 else
457 db_ring_cnt_cur = db_ring_cnt;
458
459 writel(db_ring_cnt_cur, &ring->rt->db);
460 db_ring_cnt -= db_ring_cnt_cur;
461 }
462
463 /* Restore the original ring mode (if not ring mode) */
464 if (ring->mode != K3_RINGACC_RING_MODE_RING)
465 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
466 }
467
468reset:
469 /* Reset the ring */
470 k3_ringacc_ring_reset(ring);
471}
472EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
473
474static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
475{
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700476 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800477 struct k3_ringacc *ringacc = ring->parent;
478 int ret;
479
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700480 ring_cfg.nav_id = ringacc->tisci_dev_id;
481 ring_cfg.index = ring->ring_id;
482 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
483
484 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800485 if (ret)
486 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
487 ret, ring->ring_id);
488}
489
490int k3_ringacc_ring_free(struct k3_ring *ring)
491{
492 struct k3_ringacc *ringacc;
493
494 if (!ring)
495 return -EINVAL;
496
497 ringacc = ring->parent;
498
499 dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
500
501 if (!test_bit(ring->ring_id, ringacc->rings_inuse))
502 return -EINVAL;
503
504 mutex_lock(&ringacc->req_lock);
505
506 if (--ring->use_count)
507 goto out;
508
509 if (!(ring->flags & K3_RING_FLAG_BUSY))
510 goto no_init;
511
512 k3_ringacc_ring_free_sci(ring);
513
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700514 dma_free_coherent(ring->dma_dev,
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800515 ring->size * (4 << ring->elm_size),
516 ring->ring_mem_virt, ring->ring_mem_dma);
517 ring->flags = 0;
518 ring->ops = NULL;
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700519 ring->dma_dev = NULL;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800520 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
521 clear_bit(ring->proxy_id, ringacc->proxy_inuse);
522 ring->proxy = NULL;
523 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
524 }
525
526no_init:
527 clear_bit(ring->ring_id, ringacc->rings_inuse);
528
529out:
530 mutex_unlock(&ringacc->req_lock);
531 return 0;
532}
533EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
534
535u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
536{
537 if (!ring)
538 return -EINVAL;
539
540 return ring->ring_id;
541}
542EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
543
544u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
545{
546 if (!ring)
547 return -EINVAL;
548
549 return ring->parent->tisci_dev_id;
550}
551EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
552
553int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
554{
555 int irq_num;
556
557 if (!ring)
558 return -EINVAL;
559
560 irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
561 if (irq_num <= 0)
562 irq_num = -EINVAL;
563 return irq_num;
564}
565EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
566
567static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
568{
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700569 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800570 struct k3_ringacc *ringacc = ring->parent;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800571 int ret;
572
573 if (!ringacc->tisci)
574 return -EINVAL;
575
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700576 ring_cfg.nav_id = ringacc->tisci_dev_id;
577 ring_cfg.index = ring->ring_id;
578 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
579 ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
580 ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
581 ring_cfg.count = ring->size;
582 ring_cfg.mode = ring->mode;
583 ring_cfg.size = ring->elm_size;
584
585 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800586 if (ret)
587 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
Peter Ujfalusibb49ca02020-10-25 12:10:07 -0700588 ret, ring->ring_id);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800589
590 return ret;
591}
592
593int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
594{
Grygorii Strashko80ff73f2020-07-24 14:39:40 -0700595 struct k3_ringacc *ringacc;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800596 int ret = 0;
597
598 if (!ring || !cfg)
599 return -EINVAL;
Grygorii Strashko80ff73f2020-07-24 14:39:40 -0700600 ringacc = ring->parent;
601
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800602 if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
603 cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
604 cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
605 !test_bit(ring->ring_id, ringacc->rings_inuse))
606 return -EINVAL;
607
608 if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
609 ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
610 cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
611 dev_err(ringacc->dev,
612 "Message mode must use proxy for %u element size\n",
613 4 << ring->elm_size);
614 return -EINVAL;
615 }
616
617 /*
618 * In case of shared ring only the first user (master user) can
619 * configure the ring. The sequence should be by the client:
620 * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
621 * k3_ringacc_ring_cfg(ring, cfg); # master configuration
622 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
623 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
624 */
625 if (ring->use_count != 1)
626 return 0;
627
628 ring->size = cfg->size;
629 ring->elm_size = cfg->elm_size;
630 ring->mode = cfg->mode;
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700631 memset(&ring->state, 0, sizeof(ring->state));
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800632
633 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
634 ring->proxy = ringacc->proxy_target_base +
635 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
636
637 switch (ring->mode) {
638 case K3_RINGACC_RING_MODE_RING:
639 ring->ops = &k3_ring_mode_ring_ops;
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700640 ring->dma_dev = cfg->dma_dev;
641 if (!ring->dma_dev)
642 ring->dma_dev = ringacc->dev;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800643 break;
644 case K3_RINGACC_RING_MODE_MESSAGE:
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700645 ring->dma_dev = ringacc->dev;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800646 if (ring->proxy)
647 ring->ops = &k3_ring_mode_proxy_ops;
648 else
649 ring->ops = &k3_ring_mode_msg_ops;
650 break;
651 default:
652 ring->ops = NULL;
653 ret = -EINVAL;
654 goto err_free_proxy;
kernel test robot1d036012020-07-24 14:39:47 -0700655 }
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800656
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700657 ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
658 ring->size * (4 << ring->elm_size),
659 &ring->ring_mem_dma, GFP_KERNEL);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800660 if (!ring->ring_mem_virt) {
661 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
662 ret = -ENOMEM;
663 goto err_free_ops;
664 }
665
666 ret = k3_ringacc_ring_cfg_sci(ring);
667
668 if (ret)
669 goto err_free_mem;
670
671 ring->flags |= K3_RING_FLAG_BUSY;
672 ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
673 K3_RING_FLAG_SHARED : 0;
674
675 k3_ringacc_ring_dump(ring);
676
677 return 0;
678
679err_free_mem:
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700680 dma_free_coherent(ring->dma_dev,
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800681 ring->size * (4 << ring->elm_size),
682 ring->ring_mem_virt,
683 ring->ring_mem_dma);
684err_free_ops:
685 ring->ops = NULL;
Peter Ujfalusi8c423792020-10-25 12:10:22 -0700686 ring->dma_dev = NULL;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800687err_free_proxy:
688 ring->proxy = NULL;
689 return ret;
690}
691EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
692
693u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
694{
695 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
696 return -EINVAL;
697
698 return ring->size;
699}
700EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
701
702u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
703{
704 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
705 return -EINVAL;
706
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700707 if (!ring->state.free)
708 ring->state.free = ring->size - readl(&ring->rt->occ);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800709
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700710 return ring->state.free;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800711}
712EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
713
714u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
715{
716 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
717 return -EINVAL;
718
719 return readl(&ring->rt->occ);
720}
721EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
722
723u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
724{
725 return !k3_ringacc_ring_get_free(ring);
726}
727EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
728
729enum k3_ringacc_access_mode {
730 K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
731 K3_RINGACC_ACCESS_MODE_POP_HEAD,
732 K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
733 K3_RINGACC_ACCESS_MODE_POP_TAIL,
734 K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
735 K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
736};
737
738#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
739#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
740static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
741 enum k3_ringacc_proxy_access_mode mode)
742{
743 u32 val;
744
745 val = ring->ring_id;
746 val |= K3_RINGACC_PROXY_MODE(mode);
747 val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
748 writel(val, &ring->proxy->control);
749 return 0;
750}
751
752static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
753 enum k3_ringacc_access_mode access_mode)
754{
755 void __iomem *ptr;
756
757 ptr = (void __iomem *)&ring->proxy->data;
758
759 switch (access_mode) {
760 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
761 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
762 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
763 break;
764 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
765 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
766 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
767 break;
768 default:
769 return -EINVAL;
770 }
771
772 ptr += k3_ringacc_ring_get_fifo_pos(ring);
773
774 switch (access_mode) {
775 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
776 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
777 dev_dbg(ring->parent->dev,
778 "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
779 access_mode);
780 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700781 ring->state.occ--;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800782 break;
783 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
784 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
785 dev_dbg(ring->parent->dev,
786 "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
787 access_mode);
788 memcpy_toio(ptr, elem, (4 << ring->elm_size));
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700789 ring->state.free--;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800790 break;
791 default:
792 return -EINVAL;
793 }
794
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700795 dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free,
796 ring->state.occ);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800797 return 0;
798}
799
800static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
801{
802 return k3_ringacc_ring_access_proxy(ring, elem,
803 K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
804}
805
806static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
807{
808 return k3_ringacc_ring_access_proxy(ring, elem,
809 K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
810}
811
812static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
813{
814 return k3_ringacc_ring_access_proxy(ring, elem,
815 K3_RINGACC_ACCESS_MODE_POP_HEAD);
816}
817
818static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
819{
820 return k3_ringacc_ring_access_proxy(ring, elem,
821 K3_RINGACC_ACCESS_MODE_POP_HEAD);
822}
823
824static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
825 enum k3_ringacc_access_mode access_mode)
826{
827 void __iomem *ptr;
828
829 switch (access_mode) {
830 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
831 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
832 ptr = (void __iomem *)&ring->fifos->head_data;
833 break;
834 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
835 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
836 ptr = (void __iomem *)&ring->fifos->tail_data;
837 break;
838 default:
839 return -EINVAL;
840 }
841
842 ptr += k3_ringacc_ring_get_fifo_pos(ring);
843
844 switch (access_mode) {
845 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
846 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
847 dev_dbg(ring->parent->dev,
848 "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
849 access_mode);
850 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700851 ring->state.occ--;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800852 break;
853 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
854 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
855 dev_dbg(ring->parent->dev,
856 "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
857 access_mode);
858 memcpy_toio(ptr, elem, (4 << ring->elm_size));
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700859 ring->state.free--;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800860 break;
861 default:
862 return -EINVAL;
863 }
864
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700865 dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n",
866 ring->state.free, ring->state.windex, ring->state.occ,
867 ring->state.rindex);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800868 return 0;
869}
870
871static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
872{
873 return k3_ringacc_ring_access_io(ring, elem,
874 K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
875}
876
877static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
878{
879 return k3_ringacc_ring_access_io(ring, elem,
880 K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
881}
882
883static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
884{
885 return k3_ringacc_ring_access_io(ring, elem,
886 K3_RINGACC_ACCESS_MODE_POP_HEAD);
887}
888
889static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
890{
891 return k3_ringacc_ring_access_io(ring, elem,
892 K3_RINGACC_ACCESS_MODE_POP_HEAD);
893}
894
895static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
896{
897 void *elem_ptr;
898
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700899 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800900
901 memcpy(elem_ptr, elem, (4 << ring->elm_size));
902
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700903 ring->state.windex = (ring->state.windex + 1) % ring->size;
904 ring->state.free--;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800905 writel(1, &ring->rt->db);
906
907 dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700908 ring->state.free, ring->state.windex);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800909
910 return 0;
911}
912
913static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
914{
915 void *elem_ptr;
916
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700917 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800918
919 memcpy(elem, elem_ptr, (4 << ring->elm_size));
920
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700921 ring->state.rindex = (ring->state.rindex + 1) % ring->size;
922 ring->state.occ--;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800923 writel(-1, &ring->rt->db);
924
925 dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700926 ring->state.occ, ring->state.rindex, elem_ptr);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800927 return 0;
928}
929
930int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
931{
932 int ret = -EOPNOTSUPP;
933
934 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
935 return -EINVAL;
936
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700937 dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n",
938 ring->state.free, ring->state.windex);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800939
940 if (k3_ringacc_ring_is_full(ring))
941 return -ENOMEM;
942
943 if (ring->ops && ring->ops->push_tail)
944 ret = ring->ops->push_tail(ring, elem);
945
946 return ret;
947}
948EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
949
950int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
951{
952 int ret = -EOPNOTSUPP;
953
954 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
955 return -EINVAL;
956
957 dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700958 ring->state.free, ring->state.windex);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800959
960 if (k3_ringacc_ring_is_full(ring))
961 return -ENOMEM;
962
963 if (ring->ops && ring->ops->push_head)
964 ret = ring->ops->push_head(ring, elem);
965
966 return ret;
967}
968EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
969
970int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
971{
972 int ret = -EOPNOTSUPP;
973
974 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
975 return -EINVAL;
976
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700977 if (!ring->state.occ)
978 ring->state.occ = k3_ringacc_ring_get_occ(ring);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800979
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700980 dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
981 ring->state.rindex);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800982
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -0700983 if (!ring->state.occ)
Grygorii Strashko3277e8a2020-01-15 10:07:27 -0800984 return -ENODATA;
985
986 if (ring->ops && ring->ops->pop_head)
987 ret = ring->ops->pop_head(ring, elem);
988
989 return ret;
990}
991EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
992
993int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
994{
995 int ret = -EOPNOTSUPP;
996
997 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
998 return -EINVAL;
999
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -07001000 if (!ring->state.occ)
1001 ring->state.occ = k3_ringacc_ring_get_occ(ring);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001002
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -07001003 dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
1004 ring->state.occ, ring->state.rindex);
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001005
Peter Ujfalusi6b3da0b2020-07-24 14:17:55 -07001006 if (!ring->state.occ)
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001007 return -ENODATA;
1008
1009 if (ring->ops && ring->ops->pop_tail)
1010 ret = ring->ops->pop_tail(ring, elem);
1011
1012 return ret;
1013}
1014EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
1015
1016struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
1017 const char *property)
1018{
1019 struct device_node *ringacc_np;
1020 struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
1021 struct k3_ringacc *entry;
1022
1023 ringacc_np = of_parse_phandle(np, property, 0);
1024 if (!ringacc_np)
1025 return ERR_PTR(-ENODEV);
1026
1027 mutex_lock(&k3_ringacc_list_lock);
1028 list_for_each_entry(entry, &k3_ringacc_list, list)
1029 if (entry->dev->of_node == ringacc_np) {
1030 ringacc = entry;
1031 break;
1032 }
1033 mutex_unlock(&k3_ringacc_list_lock);
1034 of_node_put(ringacc_np);
1035
1036 return ringacc;
1037}
1038EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
1039
1040static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
1041{
1042 struct device_node *node = ringacc->dev->of_node;
1043 struct device *dev = ringacc->dev;
1044 struct platform_device *pdev = to_platform_device(dev);
1045 int ret;
1046
1047 if (!node) {
1048 dev_err(dev, "device tree info unavailable\n");
1049 return -ENODEV;
1050 }
1051
1052 ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
1053 if (ret) {
1054 dev_err(dev, "ti,num-rings read failure %d\n", ret);
1055 return ret;
1056 }
1057
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001058 ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
1059 if (IS_ERR(ringacc->tisci)) {
1060 ret = PTR_ERR(ringacc->tisci);
1061 if (ret != -EPROBE_DEFER)
1062 dev_err(dev, "ti,sci read fail %d\n", ret);
1063 ringacc->tisci = NULL;
1064 return ret;
1065 }
1066
1067 ret = of_property_read_u32(node, "ti,sci-dev-id",
1068 &ringacc->tisci_dev_id);
1069 if (ret) {
1070 dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
1071 return ret;
1072 }
1073
1074 pdev->id = ringacc->tisci_dev_id;
1075
1076 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
1077 ringacc->tisci_dev_id,
1078 "ti,sci-rm-range-gp-rings");
1079 if (IS_ERR(ringacc->rm_gp_range)) {
1080 dev_err(dev, "Failed to allocate MSI interrupts\n");
1081 return PTR_ERR(ringacc->rm_gp_range);
1082 }
1083
1084 return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
1085 ringacc->rm_gp_range);
1086}
1087
Grygorii Strashko95e7be02020-09-11 21:29:56 -07001088static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
1089 .dma_ring_reset_quirk = 1,
1090};
1091
1092static const struct soc_device_attribute k3_ringacc_socinfo[] = {
1093 { .family = "AM65X",
1094 .revision = "SR1.0",
1095 .data = &k3_ringacc_soc_data_sr1
1096 },
1097 {/* sentinel */}
1098};
1099
Grygorii Strashko40a2a7c2020-07-24 14:19:43 -07001100static int k3_ringacc_init(struct platform_device *pdev,
1101 struct k3_ringacc *ringacc)
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001102{
Grygorii Strashko95e7be02020-09-11 21:29:56 -07001103 const struct soc_device_attribute *soc;
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001104 void __iomem *base_fifo, *base_rt;
1105 struct device *dev = &pdev->dev;
1106 struct resource *res;
1107 int ret, i;
1108
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001109 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
1110 DOMAIN_BUS_TI_SCI_INTA_MSI);
1111 if (!dev->msi_domain) {
1112 dev_err(dev, "Failed to get MSI domain\n");
1113 return -EPROBE_DEFER;
1114 }
1115
1116 ret = k3_ringacc_probe_dt(ringacc);
1117 if (ret)
1118 return ret;
1119
Grygorii Strashko95e7be02020-09-11 21:29:56 -07001120 soc = soc_device_match(k3_ringacc_socinfo);
1121 if (soc && soc->data) {
1122 const struct k3_ringacc_soc_data *soc_data = soc->data;
1123
1124 ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
1125 }
1126
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001127 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
1128 base_rt = devm_ioremap_resource(dev, res);
1129 if (IS_ERR(base_rt))
1130 return PTR_ERR(base_rt);
1131
1132 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
1133 base_fifo = devm_ioremap_resource(dev, res);
1134 if (IS_ERR(base_fifo))
1135 return PTR_ERR(base_fifo);
1136
1137 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
1138 ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
1139 if (IS_ERR(ringacc->proxy_gcfg))
1140 return PTR_ERR(ringacc->proxy_gcfg);
1141
1142 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1143 "proxy_target");
1144 ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
1145 if (IS_ERR(ringacc->proxy_target_base))
1146 return PTR_ERR(ringacc->proxy_target_base);
1147
1148 ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
1149 K3_RINGACC_PROXY_CFG_THREADS_MASK;
1150
1151 ringacc->rings = devm_kzalloc(dev,
1152 sizeof(*ringacc->rings) *
1153 ringacc->num_rings,
1154 GFP_KERNEL);
1155 ringacc->rings_inuse = devm_kcalloc(dev,
1156 BITS_TO_LONGS(ringacc->num_rings),
1157 sizeof(unsigned long), GFP_KERNEL);
1158 ringacc->proxy_inuse = devm_kcalloc(dev,
1159 BITS_TO_LONGS(ringacc->num_proxies),
1160 sizeof(unsigned long), GFP_KERNEL);
1161
1162 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
1163 return -ENOMEM;
1164
1165 for (i = 0; i < ringacc->num_rings; i++) {
1166 ringacc->rings[i].rt = base_rt +
1167 K3_RINGACC_RT_REGS_STEP * i;
1168 ringacc->rings[i].fifos = base_fifo +
1169 K3_RINGACC_FIFO_REGS_STEP * i;
1170 ringacc->rings[i].parent = ringacc;
1171 ringacc->rings[i].ring_id = i;
1172 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
1173 }
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001174
1175 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1176
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001177 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1178 ringacc->num_rings,
1179 ringacc->rm_gp_range->desc[0].start,
1180 ringacc->rm_gp_range->desc[0].num,
1181 ringacc->tisci_dev_id);
1182 dev_info(dev, "dma-ring-reset-quirk: %s\n",
1183 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1184 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
1185 readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
Grygorii Strashko40a2a7c2020-07-24 14:19:43 -07001186
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001187 return 0;
1188}
1189
Grygorii Strashko40a2a7c2020-07-24 14:19:43 -07001190struct ringacc_match_data {
1191 struct k3_ringacc_ops ops;
1192};
1193
1194static struct ringacc_match_data k3_ringacc_data = {
1195 .ops = {
1196 .init = k3_ringacc_init,
1197 },
1198};
1199
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001200/* Match table for of_platform binding */
1201static const struct of_device_id k3_ringacc_of_match[] = {
Grygorii Strashko40a2a7c2020-07-24 14:19:43 -07001202 { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001203 {},
1204};
1205
Grygorii Strashko40a2a7c2020-07-24 14:19:43 -07001206static int k3_ringacc_probe(struct platform_device *pdev)
1207{
1208 const struct ringacc_match_data *match_data;
1209 const struct of_device_id *match;
1210 struct device *dev = &pdev->dev;
1211 struct k3_ringacc *ringacc;
1212 int ret;
1213
1214 match = of_match_node(k3_ringacc_of_match, dev->of_node);
1215 if (!match)
1216 return -ENODEV;
1217 match_data = match->data;
1218
1219 ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
1220 if (!ringacc)
1221 return -ENOMEM;
1222
1223 ringacc->dev = dev;
1224 mutex_init(&ringacc->req_lock);
1225 ringacc->ops = &match_data->ops;
1226
1227 ret = ringacc->ops->init(pdev, ringacc);
1228 if (ret)
1229 return ret;
1230
1231 dev_set_drvdata(dev, ringacc);
1232
1233 mutex_lock(&k3_ringacc_list_lock);
1234 list_add_tail(&ringacc->list, &k3_ringacc_list);
1235 mutex_unlock(&k3_ringacc_list_lock);
1236
1237 return 0;
1238}
1239
Grygorii Strashko3277e8a2020-01-15 10:07:27 -08001240static struct platform_driver k3_ringacc_driver = {
1241 .probe = k3_ringacc_probe,
1242 .driver = {
1243 .name = "k3-ringacc",
1244 .of_match_table = k3_ringacc_of_match,
1245 .suppress_bind_attrs = true,
1246 },
1247};
1248builtin_platform_driver(k3_ringacc_driver);