blob: ec4adf4260a098488fff4471e82c2f1a89b90c54 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Russell King50437bf2012-04-13 12:07:23 +01002/*
3 * Virtual DMA channel support for DMAengine
4 *
5 * Copyright (C) 2012 Russell King
Russell King50437bf2012-04-13 12:07:23 +01006 */
7#include <linux/device.h>
8#include <linux/dmaengine.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11
12#include "virt-dma.h"
13
14static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
15{
16 return container_of(tx, struct virt_dma_desc, tx);
17}
18
19dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
20{
21 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
22 struct virt_dma_desc *vd = to_virt_desc(tx);
23 unsigned long flags;
24 dma_cookie_t cookie;
25
26 spin_lock_irqsave(&vc->lock, flags);
27 cookie = dma_cookie_assign(tx);
28
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020029 list_move_tail(&vd->node, &vc->desc_submitted);
Russell King50437bf2012-04-13 12:07:23 +010030 spin_unlock_irqrestore(&vc->lock, flags);
31
32 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
33 vc, vd, cookie);
34
35 return cookie;
36}
37EXPORT_SYMBOL_GPL(vchan_tx_submit);
38
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020039/**
40 * vchan_tx_desc_free - free a reusable descriptor
41 * @tx: the transfer
42 *
43 * This function frees a previously allocated reusable descriptor. The only
44 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
45 * transfer.
46 *
47 * Returns 0 upon success
48 */
49int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
50{
51 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
52 struct virt_dma_desc *vd = to_virt_desc(tx);
53 unsigned long flags;
54
55 spin_lock_irqsave(&vc->lock, flags);
56 list_del(&vd->node);
57 spin_unlock_irqrestore(&vc->lock, flags);
58
59 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
60 vc, vd, vd->tx.cookie);
61 vc->desc_free(vd);
62 return 0;
63}
64EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
65
Russell Kingfe045872012-05-10 23:39:27 +010066struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
67 dma_cookie_t cookie)
68{
69 struct virt_dma_desc *vd;
70
71 list_for_each_entry(vd, &vc->desc_issued, node)
72 if (vd->tx.cookie == cookie)
73 return vd;
74
75 return NULL;
76}
77EXPORT_SYMBOL_GPL(vchan_find_desc);
78
Russell King50437bf2012-04-13 12:07:23 +010079/*
80 * This tasklet handles the completion of a DMA descriptor by
81 * calling its callback and freeing it.
82 */
83static void vchan_complete(unsigned long arg)
84{
85 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
Andy Shevchenkofdb980f2017-04-25 22:36:38 +030086 struct virt_dma_desc *vd, *_vd;
Dave Jiang4f03ac62016-07-20 13:13:33 -070087 struct dmaengine_desc_callback cb;
Russell King50437bf2012-04-13 12:07:23 +010088 LIST_HEAD(head);
89
90 spin_lock_irq(&vc->lock);
91 list_splice_tail_init(&vc->desc_completed, &head);
Russell King571fa742012-05-14 15:17:20 +010092 vd = vc->cyclic;
93 if (vd) {
94 vc->cyclic = NULL;
Dave Jiang4f03ac62016-07-20 13:13:33 -070095 dmaengine_desc_get_callback(&vd->tx, &cb);
96 } else {
97 memset(&cb, 0, sizeof(cb));
Russell King571fa742012-05-14 15:17:20 +010098 }
Russell King50437bf2012-04-13 12:07:23 +010099 spin_unlock_irq(&vc->lock);
100
Alexandru Ardelean09d5b702019-06-06 13:45:47 +0300101 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
Russell King571fa742012-05-14 15:17:20 +0100102
Andy Shevchenkofdb980f2017-04-25 22:36:38 +0300103 list_for_each_entry_safe(vd, _vd, &head, node) {
Dave Jiang4f03ac62016-07-20 13:13:33 -0700104 dmaengine_desc_get_callback(&vd->tx, &cb);
Russell King50437bf2012-04-13 12:07:23 +0100105
106 list_del(&vd->node);
Peter Ujfalusi6af149d2017-11-14 16:32:03 +0200107 vchan_vdesc_fini(vd);
Russell King50437bf2012-04-13 12:07:23 +0100108
Alexandru Ardelean09d5b702019-06-06 13:45:47 +0300109 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
Russell King50437bf2012-04-13 12:07:23 +0100110 }
111}
112
113void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
114{
Andy Shevchenkofdb980f2017-04-25 22:36:38 +0300115 struct virt_dma_desc *vd, *_vd;
116
117 list_for_each_entry_safe(vd, _vd, head, node) {
Robert Jarzmik13bb26a2015-10-13 21:54:28 +0200118 if (dmaengine_desc_test_reuse(&vd->tx)) {
119 list_move_tail(&vd->node, &vc->desc_allocated);
120 } else {
121 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
122 list_del(&vd->node);
123 vc->desc_free(vd);
124 }
Russell King50437bf2012-04-13 12:07:23 +0100125 }
126}
127EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
128
129void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
130{
131 dma_cookie_init(&vc->chan);
132
133 spin_lock_init(&vc->lock);
Robert Jarzmik13bb26a2015-10-13 21:54:28 +0200134 INIT_LIST_HEAD(&vc->desc_allocated);
Russell King50437bf2012-04-13 12:07:23 +0100135 INIT_LIST_HEAD(&vc->desc_submitted);
136 INIT_LIST_HEAD(&vc->desc_issued);
137 INIT_LIST_HEAD(&vc->desc_completed);
138
139 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
140
141 vc->chan.device = dmadev;
142 list_add_tail(&vc->chan.device_node, &dmadev->channels);
143}
144EXPORT_SYMBOL_GPL(vchan_init);
145
146MODULE_AUTHOR("Russell King");
147MODULE_LICENSE("GPL");