Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Virtual DMA channel support for DMAengine |
| 4 | * |
| 5 | * Copyright (C) 2012 Russell King |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 6 | */ |
| 7 | #include <linux/device.h> |
| 8 | #include <linux/dmaengine.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/spinlock.h> |
| 11 | |
| 12 | #include "virt-dma.h" |
| 13 | |
| 14 | static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) |
| 15 | { |
| 16 | return container_of(tx, struct virt_dma_desc, tx); |
| 17 | } |
| 18 | |
| 19 | dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) |
| 20 | { |
| 21 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); |
| 22 | struct virt_dma_desc *vd = to_virt_desc(tx); |
| 23 | unsigned long flags; |
| 24 | dma_cookie_t cookie; |
| 25 | |
| 26 | spin_lock_irqsave(&vc->lock, flags); |
| 27 | cookie = dma_cookie_assign(tx); |
| 28 | |
Robert Jarzmik | 13bb26a | 2015-10-13 21:54:28 +0200 | [diff] [blame] | 29 | list_move_tail(&vd->node, &vc->desc_submitted); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 30 | spin_unlock_irqrestore(&vc->lock, flags); |
| 31 | |
| 32 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", |
| 33 | vc, vd, cookie); |
| 34 | |
| 35 | return cookie; |
| 36 | } |
| 37 | EXPORT_SYMBOL_GPL(vchan_tx_submit); |
| 38 | |
Robert Jarzmik | 13bb26a | 2015-10-13 21:54:28 +0200 | [diff] [blame] | 39 | /** |
| 40 | * vchan_tx_desc_free - free a reusable descriptor |
| 41 | * @tx: the transfer |
| 42 | * |
| 43 | * This function frees a previously allocated reusable descriptor. The only |
| 44 | * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the |
| 45 | * transfer. |
| 46 | * |
| 47 | * Returns 0 upon success |
| 48 | */ |
| 49 | int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) |
| 50 | { |
| 51 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); |
| 52 | struct virt_dma_desc *vd = to_virt_desc(tx); |
| 53 | unsigned long flags; |
| 54 | |
| 55 | spin_lock_irqsave(&vc->lock, flags); |
| 56 | list_del(&vd->node); |
| 57 | spin_unlock_irqrestore(&vc->lock, flags); |
| 58 | |
| 59 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", |
| 60 | vc, vd, vd->tx.cookie); |
| 61 | vc->desc_free(vd); |
| 62 | return 0; |
| 63 | } |
| 64 | EXPORT_SYMBOL_GPL(vchan_tx_desc_free); |
| 65 | |
Russell King | fe04587 | 2012-05-10 23:39:27 +0100 | [diff] [blame] | 66 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, |
| 67 | dma_cookie_t cookie) |
| 68 | { |
| 69 | struct virt_dma_desc *vd; |
| 70 | |
| 71 | list_for_each_entry(vd, &vc->desc_issued, node) |
| 72 | if (vd->tx.cookie == cookie) |
| 73 | return vd; |
| 74 | |
| 75 | return NULL; |
| 76 | } |
| 77 | EXPORT_SYMBOL_GPL(vchan_find_desc); |
| 78 | |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 79 | /* |
| 80 | * This tasklet handles the completion of a DMA descriptor by |
| 81 | * calling its callback and freeing it. |
| 82 | */ |
Allen Pais | 3f7a660 | 2020-08-31 16:05:37 +0530 | [diff] [blame] | 83 | static void vchan_complete(struct tasklet_struct *t) |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 84 | { |
Allen Pais | 3f7a660 | 2020-08-31 16:05:37 +0530 | [diff] [blame] | 85 | struct virt_dma_chan *vc = from_tasklet(vc, t, task); |
Andy Shevchenko | fdb980f | 2017-04-25 22:36:38 +0300 | [diff] [blame] | 86 | struct virt_dma_desc *vd, *_vd; |
Dave Jiang | 4f03ac6 | 2016-07-20 13:13:33 -0700 | [diff] [blame] | 87 | struct dmaengine_desc_callback cb; |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 88 | LIST_HEAD(head); |
| 89 | |
| 90 | spin_lock_irq(&vc->lock); |
| 91 | list_splice_tail_init(&vc->desc_completed, &head); |
Russell King | 571fa74 | 2012-05-14 15:17:20 +0100 | [diff] [blame] | 92 | vd = vc->cyclic; |
| 93 | if (vd) { |
| 94 | vc->cyclic = NULL; |
Dave Jiang | 4f03ac6 | 2016-07-20 13:13:33 -0700 | [diff] [blame] | 95 | dmaengine_desc_get_callback(&vd->tx, &cb); |
| 96 | } else { |
| 97 | memset(&cb, 0, sizeof(cb)); |
Russell King | 571fa74 | 2012-05-14 15:17:20 +0100 | [diff] [blame] | 98 | } |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 99 | spin_unlock_irq(&vc->lock); |
| 100 | |
Alexandru Ardelean | 09d5b70 | 2019-06-06 13:45:47 +0300 | [diff] [blame] | 101 | dmaengine_desc_callback_invoke(&cb, &vd->tx_result); |
Russell King | 571fa74 | 2012-05-14 15:17:20 +0100 | [diff] [blame] | 102 | |
Andy Shevchenko | fdb980f | 2017-04-25 22:36:38 +0300 | [diff] [blame] | 103 | list_for_each_entry_safe(vd, _vd, &head, node) { |
Dave Jiang | 4f03ac6 | 2016-07-20 13:13:33 -0700 | [diff] [blame] | 104 | dmaengine_desc_get_callback(&vd->tx, &cb); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 105 | |
| 106 | list_del(&vd->node); |
Alexandru Ardelean | 09d5b70 | 2019-06-06 13:45:47 +0300 | [diff] [blame] | 107 | dmaengine_desc_callback_invoke(&cb, &vd->tx_result); |
Peter Ujfalusi | 24461d9 | 2019-12-20 15:11:00 +0200 | [diff] [blame] | 108 | vchan_vdesc_fini(vd); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 109 | } |
| 110 | } |
| 111 | |
| 112 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) |
| 113 | { |
Andy Shevchenko | fdb980f | 2017-04-25 22:36:38 +0300 | [diff] [blame] | 114 | struct virt_dma_desc *vd, *_vd; |
| 115 | |
| 116 | list_for_each_entry_safe(vd, _vd, head, node) { |
Sascha Hauer | b167f94 | 2019-12-16 11:53:25 +0100 | [diff] [blame] | 117 | list_del(&vd->node); |
| 118 | vchan_vdesc_fini(vd); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 119 | } |
| 120 | } |
| 121 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); |
| 122 | |
| 123 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) |
| 124 | { |
| 125 | dma_cookie_init(&vc->chan); |
| 126 | |
| 127 | spin_lock_init(&vc->lock); |
Robert Jarzmik | 13bb26a | 2015-10-13 21:54:28 +0200 | [diff] [blame] | 128 | INIT_LIST_HEAD(&vc->desc_allocated); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 129 | INIT_LIST_HEAD(&vc->desc_submitted); |
| 130 | INIT_LIST_HEAD(&vc->desc_issued); |
| 131 | INIT_LIST_HEAD(&vc->desc_completed); |
Sascha Hauer | f8821011 | 2019-12-16 11:53:23 +0100 | [diff] [blame] | 132 | INIT_LIST_HEAD(&vc->desc_terminated); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 133 | |
Allen Pais | 3f7a660 | 2020-08-31 16:05:37 +0530 | [diff] [blame] | 134 | tasklet_setup(&vc->task, vchan_complete); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 135 | |
| 136 | vc->chan.device = dmadev; |
| 137 | list_add_tail(&vc->chan.device_node, &dmadev->channels); |
| 138 | } |
| 139 | EXPORT_SYMBOL_GPL(vchan_init); |
| 140 | |
| 141 | MODULE_AUTHOR("Russell King"); |
| 142 | MODULE_LICENSE("GPL"); |