blob: eb767c583b7e4ec0d7e69158f6eee4656e0a46c2 [file] [log] [blame]
Russell King50437bf2012-04-13 12:07:23 +01001/*
2 * Virtual DMA channel support for DMAengine
3 *
4 * Copyright (C) 2012 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef VIRT_DMA_H
11#define VIRT_DMA_H
12
13#include <linux/dmaengine.h>
14#include <linux/interrupt.h>
15
16#include "dmaengine.h"
17
18struct virt_dma_desc {
19 struct dma_async_tx_descriptor tx;
Alexandru Ardelean09d5b702019-06-06 13:45:47 +030020 struct dmaengine_result tx_result;
Russell King50437bf2012-04-13 12:07:23 +010021 /* protected by vc.lock */
22 struct list_head node;
23};
24
25struct virt_dma_chan {
26 struct dma_chan chan;
27 struct tasklet_struct task;
28 void (*desc_free)(struct virt_dma_desc *);
29
30 spinlock_t lock;
31
32 /* protected by vc.lock */
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020033 struct list_head desc_allocated;
Russell King50437bf2012-04-13 12:07:23 +010034 struct list_head desc_submitted;
35 struct list_head desc_issued;
36 struct list_head desc_completed;
Russell King571fa742012-05-14 15:17:20 +010037
38 struct virt_dma_desc *cyclic;
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +020039 struct virt_dma_desc *vd_terminated;
Russell King50437bf2012-04-13 12:07:23 +010040};
41
42static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
43{
44 return container_of(chan, struct virt_dma_chan, chan);
45}
46
47void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
Russell King50437bf2012-04-13 12:07:23 +010048void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
Russell Kingfe045872012-05-10 23:39:27 +010049struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
Baoyou Xie02aa8482016-09-24 12:37:05 +080050extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
51extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
Russell King50437bf2012-04-13 12:07:23 +010052
53/**
54 * vchan_tx_prep - prepare a descriptor
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +020055 * @vc: virtual channel allocating this descriptor
56 * @vd: virtual descriptor to prepare
57 * @tx_flags: flags argument passed in to prepare function
Russell King50437bf2012-04-13 12:07:23 +010058 */
59static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
60 struct virt_dma_desc *vd, unsigned long tx_flags)
61{
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020062 unsigned long flags;
Russell King50437bf2012-04-13 12:07:23 +010063
64 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
65 vd->tx.flags = tx_flags;
66 vd->tx.tx_submit = vchan_tx_submit;
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020067 vd->tx.desc_free = vchan_tx_desc_free;
68
Alexandru Ardelean09d5b702019-06-06 13:45:47 +030069 vd->tx_result.result = DMA_TRANS_NOERROR;
70 vd->tx_result.residue = 0;
71
Robert Jarzmik13bb26a2015-10-13 21:54:28 +020072 spin_lock_irqsave(&vc->lock, flags);
73 list_add_tail(&vd->node, &vc->desc_allocated);
74 spin_unlock_irqrestore(&vc->lock, flags);
Russell King50437bf2012-04-13 12:07:23 +010075
76 return &vd->tx;
77}
78
79/**
80 * vchan_issue_pending - move submitted descriptors to issued list
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +020081 * @vc: virtual channel to update
Russell King50437bf2012-04-13 12:07:23 +010082 *
83 * vc.lock must be held by caller
84 */
85static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
86{
87 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
88 return !list_empty(&vc->desc_issued);
89}
90
91/**
92 * vchan_cookie_complete - report completion of a descriptor
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +020093 * @vd: virtual descriptor to update
Russell King50437bf2012-04-13 12:07:23 +010094 *
95 * vc.lock must be held by caller
96 */
97static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
98{
99 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
Jonas Jensenaf586522013-12-06 16:42:09 +0100100 dma_cookie_t cookie;
Russell King50437bf2012-04-13 12:07:23 +0100101
Jonas Jensenaf586522013-12-06 16:42:09 +0100102 cookie = vd->tx.cookie;
Russell King50437bf2012-04-13 12:07:23 +0100103 dma_cookie_complete(&vd->tx);
104 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
Jonas Jensenaf586522013-12-06 16:42:09 +0100105 vd, cookie);
Russell King50437bf2012-04-13 12:07:23 +0100106 list_add_tail(&vd->node, &vc->desc_completed);
107
108 tasklet_schedule(&vc->task);
109}
110
111/**
Peter Ujfalusi6af149d2017-11-14 16:32:03 +0200112 * vchan_vdesc_fini - Free or reuse a descriptor
113 * @vd: virtual descriptor to free/reuse
114 */
115static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
116{
117 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
118
119 if (dmaengine_desc_test_reuse(&vd->tx))
120 list_add(&vd->node, &vc->desc_allocated);
121 else
122 vc->desc_free(vd);
123}
124
125/**
Russell King571fa742012-05-14 15:17:20 +0100126 * vchan_cyclic_callback - report the completion of a period
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +0200127 * @vd: virtual descriptor
Russell King571fa742012-05-14 15:17:20 +0100128 */
129static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
130{
131 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
132
133 vc->cyclic = vd;
134 tasklet_schedule(&vc->task);
135}
136
137/**
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200138 * vchan_terminate_vdesc - Disable pending cyclic callback
139 * @vd: virtual descriptor to be terminated
140 *
141 * vc.lock must be held by caller
142 */
143static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
144{
145 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
146
147 /* free up stuck descriptor */
148 if (vc->vd_terminated)
149 vchan_vdesc_fini(vc->vd_terminated);
150
151 vc->vd_terminated = vd;
152 if (vc->cyclic == vd)
153 vc->cyclic = NULL;
154}
155
156/**
Russell King50437bf2012-04-13 12:07:23 +0100157 * vchan_next_desc - peek at the next descriptor to be processed
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +0200158 * @vc: virtual channel to obtain descriptor from
Russell King50437bf2012-04-13 12:07:23 +0100159 *
160 * vc.lock must be held by caller
161 */
162static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
163{
Masahiro Yamada360af352016-09-13 03:08:17 +0900164 return list_first_entry_or_null(&vc->desc_issued,
165 struct virt_dma_desc, node);
Russell King50437bf2012-04-13 12:07:23 +0100166}
167
168/**
Jun Nie8c8fe972015-07-10 20:02:49 +0800169 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
Lars-Peter Clausen28ca3e82015-10-20 13:14:45 +0200170 * @vc: virtual channel to get descriptors from
171 * @head: list of descriptors found
Russell King50437bf2012-04-13 12:07:23 +0100172 *
173 * vc.lock must be held by caller
174 *
175 * Removes all submitted and issued descriptors from internal lists, and
176 * provides a list of all descriptors found
177 */
178static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
179 struct list_head *head)
180{
Robert Jarzmik13bb26a2015-10-13 21:54:28 +0200181 list_splice_tail_init(&vc->desc_allocated, head);
Russell King50437bf2012-04-13 12:07:23 +0100182 list_splice_tail_init(&vc->desc_submitted, head);
183 list_splice_tail_init(&vc->desc_issued, head);
184 list_splice_tail_init(&vc->desc_completed, head);
185}
186
187static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
188{
Robert Jarzmik13bb26a2015-10-13 21:54:28 +0200189 struct virt_dma_desc *vd;
Russell King50437bf2012-04-13 12:07:23 +0100190 unsigned long flags;
191 LIST_HEAD(head);
192
193 spin_lock_irqsave(&vc->lock, flags);
194 vchan_get_all_descriptors(vc, &head);
Robert Jarzmik13bb26a2015-10-13 21:54:28 +0200195 list_for_each_entry(vd, &head, node)
196 dmaengine_desc_clear_reuse(&vd->tx);
Russell King50437bf2012-04-13 12:07:23 +0100197 spin_unlock_irqrestore(&vc->lock, flags);
198
199 vchan_dma_desc_free_list(vc, &head);
200}
201
Lars-Peter Clausen2ed08622015-10-20 11:46:29 +0200202/**
203 * vchan_synchronize() - synchronize callback execution to the current context
204 * @vc: virtual channel to synchronize
205 *
206 * Makes sure that all scheduled or active callbacks have finished running. For
207 * proper operation the caller has to ensure that no new callbacks are scheduled
208 * after the invocation of this function started.
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200209 * Free up the terminated cyclic descriptor to prevent memory leakage.
Lars-Peter Clausen2ed08622015-10-20 11:46:29 +0200210 */
211static inline void vchan_synchronize(struct virt_dma_chan *vc)
212{
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200213 unsigned long flags;
214
Lars-Peter Clausen2ed08622015-10-20 11:46:29 +0200215 tasklet_kill(&vc->task);
Peter Ujfalusi1c7f0722017-11-14 16:32:04 +0200216
217 spin_lock_irqsave(&vc->lock, flags);
218 if (vc->vd_terminated) {
219 vchan_vdesc_fini(vc->vd_terminated);
220 vc->vd_terminated = NULL;
221 }
222 spin_unlock_irqrestore(&vc->lock, flags);
Lars-Peter Clausen2ed08622015-10-20 11:46:29 +0200223}
224
Russell King50437bf2012-04-13 12:07:23 +0100225#endif