blob: 265303a396caea76481794389bc62219c42c0bd4 [file] [log] [blame]
Thomas Gleixner9952f692019-05-28 10:10:04 -07001// SPDX-License-Identifier: GPL-2.0-only
Laxman Dewanganec8a1582012-06-06 10:55:27 +05302/*
3 * DMA driver for Nvidia's Tegra20 APB DMA controller.
4 *
Stephen Warren996556c2013-11-11 13:09:35 -07005 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
Laxman Dewanganec8a1582012-06-06 10:55:27 +05306 */
7
8#include <linux/bitops.h>
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h>
Thierry Reding73312052013-01-21 11:09:00 +010013#include <linux/err.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053014#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/mm.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
Stephen Warren996556c2013-11-11 13:09:35 -070021#include <linux/of_dma.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053022#include <linux/platform_device.h>
Laxman Dewangan3065c192013-04-24 15:24:27 +053023#include <linux/pm.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053024#include <linux/pm_runtime.h>
Stephen Warren9aa433d2013-11-06 16:35:34 -070025#include <linux/reset.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053026#include <linux/slab.h>
Dmitry Osipenko66972552020-03-20 00:23:21 +030027#include <linux/wait.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053028
Laxman Dewanganec8a1582012-06-06 10:55:27 +053029#include "dmaengine.h"
30
Ben Dooks95f295f2018-11-21 16:13:23 +000031#define CREATE_TRACE_POINTS
32#include <trace/events/tegra_apb_dma.h>
33
Laxman Dewanganec8a1582012-06-06 10:55:27 +053034#define TEGRA_APBDMA_GENERAL 0x0
35#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
36
37#define TEGRA_APBDMA_CONTROL 0x010
38#define TEGRA_APBDMA_IRQ_MASK 0x01c
39#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
40
41/* CSR register */
42#define TEGRA_APBDMA_CHAN_CSR 0x00
43#define TEGRA_APBDMA_CSR_ENB BIT(31)
44#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
45#define TEGRA_APBDMA_CSR_HOLD BIT(29)
46#define TEGRA_APBDMA_CSR_DIR BIT(28)
47#define TEGRA_APBDMA_CSR_ONCE BIT(27)
48#define TEGRA_APBDMA_CSR_FLOW BIT(21)
49#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
Shardar Shariff Md00ef4492016-04-23 15:06:00 +053050#define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F
Laxman Dewanganec8a1582012-06-06 10:55:27 +053051#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
52
53/* STATUS register */
54#define TEGRA_APBDMA_CHAN_STATUS 0x004
55#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
56#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
57#define TEGRA_APBDMA_STATUS_HALT BIT(29)
58#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
59#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
60#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
61
Laxman Dewangan1b140902013-01-06 21:52:02 +053062#define TEGRA_APBDMA_CHAN_CSRE 0x00C
Dmitry Osipenko39642932020-02-09 19:33:45 +030063#define TEGRA_APBDMA_CHAN_CSRE_PAUSE BIT(31)
Laxman Dewangan1b140902013-01-06 21:52:02 +053064
Laxman Dewanganec8a1582012-06-06 10:55:27 +053065/* AHB memory address */
66#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
67
68/* AHB sequence register */
69#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
70#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
71#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
72#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
73#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
74#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
75#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
76#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
77#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
78#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
79#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
80#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
81#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
82#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
83
84/* APB address */
85#define TEGRA_APBDMA_CHAN_APBPTR 0x018
86
87/* APB sequence register */
88#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
89#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
90#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
91#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
92#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
93#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
94#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
95#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
96
Laxman Dewangan911dacc2014-01-06 11:16:45 -070097/* Tegra148 specific registers */
98#define TEGRA_APBDMA_CHAN_WCOUNT 0x20
99
100#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
101
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530102/*
103 * If any burst is in flight and DMA paused then this is the time to complete
104 * on-flight burst and update DMA status register.
105 */
106#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
107
108/* Channel base address offset from APBDMA base address */
109#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
110
Shardar Shariff Md00ef4492016-04-23 15:06:00 +0530111#define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
112
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530113struct tegra_dma;
114
115/*
116 * tegra_dma_chip_data Tegra chip specific DMA data
117 * @nr_channels: Number of channels available in the controller.
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700118 * @channel_reg_size: Channel register size/stride.
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530119 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
Laxman Dewangan1b140902013-01-06 21:52:02 +0530120 * @support_channel_pause: Support channel wise pause of dma.
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700121 * @support_separate_wcount_reg: Support separate word count register.
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530122 */
123struct tegra_dma_chip_data {
Dmitry Osipenko39642932020-02-09 19:33:45 +0300124 unsigned int nr_channels;
125 unsigned int channel_reg_size;
126 unsigned int max_dma_count;
Laxman Dewangan1b140902013-01-06 21:52:02 +0530127 bool support_channel_pause;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700128 bool support_separate_wcount_reg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530129};
130
131/* DMA channel registers */
132struct tegra_dma_channel_regs {
Dmitry Osipenko39642932020-02-09 19:33:45 +0300133 u32 csr;
134 u32 ahb_ptr;
135 u32 apb_ptr;
136 u32 ahb_seq;
137 u32 apb_seq;
138 u32 wcount;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530139};
140
141/*
Ben Dooks547b3112018-11-21 16:13:21 +0000142 * tegra_dma_sg_req: DMA request details to configure hardware. This
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530143 * contains the details for one transfer to configure DMA hw.
144 * The client's request for data transfer can be broken into multiple
145 * sub-transfer as per requester details and hw support.
146 * This sub transfer get added in the list of transfer and point to Tegra
147 * DMA descriptor which manages the transfer details.
148 */
149struct tegra_dma_sg_req {
150 struct tegra_dma_channel_regs ch_regs;
Ben Dooks216a1d72018-11-21 16:13:20 +0000151 unsigned int req_len;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530152 bool configured;
153 bool last_sg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530154 struct list_head node;
155 struct tegra_dma_desc *dma_desc;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300156 unsigned int words_xferred;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530157};
158
159/*
160 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
161 * This descriptor keep track of transfer status, callbacks and request
162 * counts etc.
163 */
164struct tegra_dma_desc {
165 struct dma_async_tx_descriptor txd;
Ben Dooks216a1d72018-11-21 16:13:20 +0000166 unsigned int bytes_requested;
167 unsigned int bytes_transferred;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530168 enum dma_status dma_status;
169 struct list_head node;
170 struct list_head tx_list;
171 struct list_head cb_node;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300172 unsigned int cb_count;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530173};
174
175struct tegra_dma_channel;
176
177typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
178 bool to_terminate);
179
180/* tegra_dma_channel: Channel specific information */
181struct tegra_dma_channel {
182 struct dma_chan dma_chan;
Ben Dooks65c383c2018-11-21 16:13:22 +0000183 char name[12];
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530184 bool config_init;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300185 unsigned int id;
Jon Hunter13a33282015-08-06 14:32:31 +0100186 void __iomem *chan_addr;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530187 spinlock_t lock;
188 bool busy;
189 struct tegra_dma *tdma;
190 bool cyclic;
191
192 /* Different lists for managing the requests */
193 struct list_head free_sg_req;
194 struct list_head pending_sg_req;
195 struct list_head free_dma_desc;
196 struct list_head cb_desc;
197
198 /* ISR handler and tasklet for bottom half of isr handling */
199 dma_isr_handler isr_handler;
200 struct tasklet_struct tasklet;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530201
202 /* Channel-slave specific configuration */
Stephen Warren996556c2013-11-11 13:09:35 -0700203 unsigned int slave_id;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530204 struct dma_slave_config dma_sconfig;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300205 struct tegra_dma_channel_regs channel_reg;
Dmitry Osipenko66972552020-03-20 00:23:21 +0300206
207 struct wait_queue_head wq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530208};
209
210/* tegra_dma: Tegra DMA specific information */
211struct tegra_dma {
212 struct dma_device dma_dev;
213 struct device *dev;
214 struct clk *dma_clk;
Stephen Warren9aa433d2013-11-06 16:35:34 -0700215 struct reset_control *rst;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530216 spinlock_t global_lock;
217 void __iomem *base_addr;
Laxman Dewangan83a1ef22012-08-29 10:23:07 +0200218 const struct tegra_dma_chip_data *chip_data;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530219
Jon Hunter23a1ec32015-08-06 14:32:33 +0100220 /*
221 * Counter for managing global pausing of the DMA controller.
222 * Only applicable for devices that don't support individual
223 * channel pausing.
224 */
225 u32 global_pause_count;
226
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530227 /* Last member of the structure */
228 struct tegra_dma_channel channels[0];
229};
230
231static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
232{
233 writel(val, tdma->base_addr + reg);
234}
235
236static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
237{
238 return readl(tdma->base_addr + reg);
239}
240
241static inline void tdc_write(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300242 u32 reg, u32 val)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530243{
Jon Hunter13a33282015-08-06 14:32:31 +0100244 writel(val, tdc->chan_addr + reg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530245}
246
247static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
248{
Jon Hunter13a33282015-08-06 14:32:31 +0100249 return readl(tdc->chan_addr + reg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530250}
251
252static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
253{
254 return container_of(dc, struct tegra_dma_channel, dma_chan);
255}
256
Dmitry Osipenko39642932020-02-09 19:33:45 +0300257static inline struct tegra_dma_desc *
258txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530259{
260 return container_of(td, struct tegra_dma_desc, txd);
261}
262
263static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
264{
265 return &tdc->dma_chan.dev->device;
266}
267
268static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530269
270/* Get DMA desc from free list, if not there then allocate it. */
Dmitry Osipenko39642932020-02-09 19:33:45 +0300271static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530272{
273 struct tegra_dma_desc *dma_desc;
274 unsigned long flags;
275
276 spin_lock_irqsave(&tdc->lock, flags);
277
278 /* Do not allocate if desc are waiting for ack */
279 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
Dmitry Osipenkoc33ee132020-02-09 19:33:39 +0300280 if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530281 list_del(&dma_desc->node);
282 spin_unlock_irqrestore(&tdc->lock, flags);
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +0530283 dma_desc->txd.flags = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530284 return dma_desc;
285 }
286 }
287
288 spin_unlock_irqrestore(&tdc->lock, flags);
289
290 /* Allocate DMA desc */
Jon Hunter8fe97392015-11-13 16:39:42 +0000291 dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
Peter Griffinaef94fe2016-06-07 18:38:41 +0100292 if (!dma_desc)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530293 return NULL;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530294
295 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
296 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
297 dma_desc->txd.flags = 0;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300298
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530299 return dma_desc;
300}
301
302static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300303 struct tegra_dma_desc *dma_desc)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530304{
305 unsigned long flags;
306
307 spin_lock_irqsave(&tdc->lock, flags);
308 if (!list_empty(&dma_desc->tx_list))
309 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
310 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
311 spin_unlock_irqrestore(&tdc->lock, flags);
312}
313
Dmitry Osipenko39642932020-02-09 19:33:45 +0300314static struct tegra_dma_sg_req *
315tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530316{
Dmitry Osipenko39642932020-02-09 19:33:45 +0300317 struct tegra_dma_sg_req *sg_req;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530318 unsigned long flags;
319
320 spin_lock_irqsave(&tdc->lock, flags);
321 if (!list_empty(&tdc->free_sg_req)) {
Dmitry Osipenko39642932020-02-09 19:33:45 +0300322 sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
323 node);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530324 list_del(&sg_req->node);
325 spin_unlock_irqrestore(&tdc->lock, flags);
326 return sg_req;
327 }
328 spin_unlock_irqrestore(&tdc->lock, flags);
329
Dmitry Osipenko39642932020-02-09 19:33:45 +0300330 sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT);
Peter Griffinaef94fe2016-06-07 18:38:41 +0100331
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530332 return sg_req;
333}
334
335static int tegra_dma_slave_config(struct dma_chan *dc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300336 struct dma_slave_config *sconfig)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530337{
338 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
339
340 if (!list_empty(&tdc->pending_sg_req)) {
341 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
342 return -EBUSY;
343 }
344
345 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
Dmitry Osipenkof6160f32017-11-16 20:11:06 +0300346 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
347 sconfig->device_fc) {
Shardar Shariff Md00ef4492016-04-23 15:06:00 +0530348 if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
349 return -EINVAL;
Stephen Warren996556c2013-11-11 13:09:35 -0700350 tdc->slave_id = sconfig->slave_id;
Shardar Shariff Md00ef4492016-04-23 15:06:00 +0530351 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530352 tdc->config_init = true;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300353
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530354 return 0;
355}
356
357static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300358 bool wait_for_burst_complete)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530359{
360 struct tegra_dma *tdma = tdc->tdma;
361
362 spin_lock(&tdma->global_lock);
Jon Hunter23a1ec32015-08-06 14:32:33 +0100363
364 if (tdc->tdma->global_pause_count == 0) {
365 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
366 if (wait_for_burst_complete)
367 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
368 }
369
370 tdc->tdma->global_pause_count++;
371
372 spin_unlock(&tdma->global_lock);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530373}
374
375static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
376{
377 struct tegra_dma *tdma = tdc->tdma;
378
Jon Hunter23a1ec32015-08-06 14:32:33 +0100379 spin_lock(&tdma->global_lock);
380
381 if (WARN_ON(tdc->tdma->global_pause_count == 0))
382 goto out;
383
384 if (--tdc->tdma->global_pause_count == 0)
385 tdma_write(tdma, TEGRA_APBDMA_GENERAL,
386 TEGRA_APBDMA_GENERAL_ENABLE);
387
388out:
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530389 spin_unlock(&tdma->global_lock);
390}
391
Laxman Dewangan1b140902013-01-06 21:52:02 +0530392static void tegra_dma_pause(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300393 bool wait_for_burst_complete)
Laxman Dewangan1b140902013-01-06 21:52:02 +0530394{
395 struct tegra_dma *tdma = tdc->tdma;
396
397 if (tdma->chip_data->support_channel_pause) {
398 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300399 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
Laxman Dewangan1b140902013-01-06 21:52:02 +0530400 if (wait_for_burst_complete)
401 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
402 } else {
403 tegra_dma_global_pause(tdc, wait_for_burst_complete);
404 }
405}
406
407static void tegra_dma_resume(struct tegra_dma_channel *tdc)
408{
409 struct tegra_dma *tdma = tdc->tdma;
410
Dmitry Osipenko39642932020-02-09 19:33:45 +0300411 if (tdma->chip_data->support_channel_pause)
Laxman Dewangan1b140902013-01-06 21:52:02 +0530412 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
Dmitry Osipenko39642932020-02-09 19:33:45 +0300413 else
Laxman Dewangan1b140902013-01-06 21:52:02 +0530414 tegra_dma_global_resume(tdc);
Laxman Dewangan1b140902013-01-06 21:52:02 +0530415}
416
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530417static void tegra_dma_stop(struct tegra_dma_channel *tdc)
418{
Dmitry Osipenko39642932020-02-09 19:33:45 +0300419 u32 csr, status;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530420
421 /* Disable interrupts */
422 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
423 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
424 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
425
426 /* Disable DMA */
427 csr &= ~TEGRA_APBDMA_CSR_ENB;
428 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
429
430 /* Clear interrupt status if it is there */
431 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
432 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
433 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
434 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
435 }
436 tdc->busy = false;
437}
438
439static void tegra_dma_start(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300440 struct tegra_dma_sg_req *sg_req)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530441{
442 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
443
444 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
445 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
446 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
447 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
448 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700449 if (tdc->tdma->chip_data->support_separate_wcount_reg)
450 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530451
452 /* Start DMA */
453 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300454 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530455}
456
457static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300458 struct tegra_dma_sg_req *nsg_req)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530459{
460 unsigned long status;
461
462 /*
463 * The DMA controller reloads the new configuration for next transfer
464 * after last burst of current transfer completes.
465 * If there is no IEC status then this makes sure that last burst
466 * has not be completed. There may be case that last burst is on
467 * flight and so it can complete but because DMA is paused, it
468 * will not generates interrupt as well as not reload the new
469 * configuration.
470 * If there is already IEC status then interrupt handler need to
471 * load new configuration.
472 */
Laxman Dewangan1b140902013-01-06 21:52:02 +0530473 tegra_dma_pause(tdc, false);
Thierry Reding7b0e00d2016-06-14 16:18:46 +0200474 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530475
476 /*
477 * If interrupt is pending then do nothing as the ISR will handle
478 * the programing for new request.
479 */
480 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
481 dev_err(tdc2dev(tdc),
482 "Skipping new configuration as interrupt is pending\n");
Laxman Dewangan1b140902013-01-06 21:52:02 +0530483 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530484 return;
485 }
486
487 /* Safe to program new configuration */
488 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
489 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700490 if (tdc->tdma->chip_data->support_separate_wcount_reg)
491 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300492 nsg_req->ch_regs.wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530493 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300494 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530495 nsg_req->configured = true;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300496 nsg_req->words_xferred = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530497
Laxman Dewangan1b140902013-01-06 21:52:02 +0530498 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530499}
500
501static void tdc_start_head_req(struct tegra_dma_channel *tdc)
502{
503 struct tegra_dma_sg_req *sg_req;
504
Dmitry Osipenko39642932020-02-09 19:33:45 +0300505 sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530506 tegra_dma_start(tdc, sg_req);
507 sg_req->configured = true;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300508 sg_req->words_xferred = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530509 tdc->busy = true;
510}
511
512static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
513{
Dmitry Osipenko39642932020-02-09 19:33:45 +0300514 struct tegra_dma_sg_req *hsgreq, *hnsgreq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530515
516 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
517 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
Dmitry Osipenko39642932020-02-09 19:33:45 +0300518 hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
519 node);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530520 tegra_dma_configure_for_next(tdc, hnsgreq);
521 }
522}
523
Dmitry Osipenko39642932020-02-09 19:33:45 +0300524static inline unsigned int
525get_current_xferred_count(struct tegra_dma_channel *tdc,
526 struct tegra_dma_sg_req *sg_req,
527 unsigned long status)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530528{
529 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
530}
531
532static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
533{
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530534 struct tegra_dma_desc *dma_desc;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300535 struct tegra_dma_sg_req *sgreq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530536
537 while (!list_empty(&tdc->pending_sg_req)) {
Dmitry Osipenko39642932020-02-09 19:33:45 +0300538 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
539 node);
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800540 list_move_tail(&sgreq->node, &tdc->free_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530541 if (sgreq->last_sg) {
542 dma_desc = sgreq->dma_desc;
543 dma_desc->dma_status = DMA_ERROR;
544 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
545
546 /* Add in cb list if it is not there. */
547 if (!dma_desc->cb_count)
548 list_add_tail(&dma_desc->cb_node,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300549 &tdc->cb_desc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530550 dma_desc->cb_count++;
551 }
552 }
553 tdc->isr_handler = NULL;
554}
555
556static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300557 bool to_terminate)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530558{
Dmitry Osipenko39642932020-02-09 19:33:45 +0300559 struct tegra_dma_sg_req *hsgreq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530560
561 /*
562 * Check that head req on list should be in flight.
563 * If it is not in flight then abort transfer as
564 * looping of transfer can not continue.
565 */
566 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
567 if (!hsgreq->configured) {
568 tegra_dma_stop(tdc);
Dmitry Osipenko84a3f372020-02-09 19:33:49 +0300569 pm_runtime_put(tdc->tdma->dev);
Dmitry Osipenko01b66a72020-02-09 19:33:56 +0300570 dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530571 tegra_dma_abort_all(tdc);
572 return false;
573 }
574
575 /* Configure next request */
576 if (!to_terminate)
577 tdc_configure_next_head_desc(tdc);
Dmitry Osipenko39642932020-02-09 19:33:45 +0300578
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530579 return true;
580}
581
582static void handle_once_dma_done(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300583 bool to_terminate)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530584{
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530585 struct tegra_dma_desc *dma_desc;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300586 struct tegra_dma_sg_req *sgreq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530587
588 tdc->busy = false;
589 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
590 dma_desc = sgreq->dma_desc;
591 dma_desc->bytes_transferred += sgreq->req_len;
592
593 list_del(&sgreq->node);
594 if (sgreq->last_sg) {
Vinod Koul00d696f2013-10-16 21:04:50 +0530595 dma_desc->dma_status = DMA_COMPLETE;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530596 dma_cookie_complete(&dma_desc->txd);
597 if (!dma_desc->cb_count)
598 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
599 dma_desc->cb_count++;
600 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
601 }
602 list_add_tail(&sgreq->node, &tdc->free_sg_req);
603
604 /* Do not start DMA if it is going to be terminate */
Dmitry Osipenko84a3f372020-02-09 19:33:49 +0300605 if (to_terminate)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530606 return;
607
Dmitry Osipenko84a3f372020-02-09 19:33:49 +0300608 if (list_empty(&tdc->pending_sg_req)) {
609 pm_runtime_put(tdc->tdma->dev);
610 return;
611 }
612
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530613 tdc_start_head_req(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530614}
615
616static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300617 bool to_terminate)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530618{
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530619 struct tegra_dma_desc *dma_desc;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300620 struct tegra_dma_sg_req *sgreq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530621 bool st;
622
623 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
624 dma_desc = sgreq->dma_desc;
Ben Dookse486df32018-11-21 16:13:19 +0000625 /* if we dma for long enough the transfer count will wrap */
626 dma_desc->bytes_transferred =
627 (dma_desc->bytes_transferred + sgreq->req_len) %
628 dma_desc->bytes_requested;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530629
630 /* Callback need to be call */
631 if (!dma_desc->cb_count)
632 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
633 dma_desc->cb_count++;
634
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300635 sgreq->words_xferred = 0;
636
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530637 /* If not last req then put at end of pending list */
638 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800639 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530640 sgreq->configured = false;
Dmitry Osipenkof261f1c2020-02-09 19:33:55 +0300641 st = handle_continuous_head_request(tdc, to_terminate);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530642 if (!st)
643 dma_desc->dma_status = DMA_ERROR;
644 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530645}
646
647static void tegra_dma_tasklet(unsigned long data)
648{
649 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
Dave Jiang370c0442016-07-20 13:13:16 -0700650 struct dmaengine_desc_callback cb;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530651 struct tegra_dma_desc *dma_desc;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300652 unsigned int cb_count;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530653 unsigned long flags;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530654
655 spin_lock_irqsave(&tdc->lock, flags);
656 while (!list_empty(&tdc->cb_desc)) {
Dmitry Osipenko39642932020-02-09 19:33:45 +0300657 dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
658 cb_node);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530659 list_del(&dma_desc->cb_node);
Dave Jiang370c0442016-07-20 13:13:16 -0700660 dmaengine_desc_get_callback(&dma_desc->txd, &cb);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530661 cb_count = dma_desc->cb_count;
662 dma_desc->cb_count = 0;
Ben Dooks95f295f2018-11-21 16:13:23 +0000663 trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
664 cb.callback);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530665 spin_unlock_irqrestore(&tdc->lock, flags);
Dave Jiang370c0442016-07-20 13:13:16 -0700666 while (cb_count--)
667 dmaengine_desc_callback_invoke(&cb, NULL);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530668 spin_lock_irqsave(&tdc->lock, flags);
669 }
670 spin_unlock_irqrestore(&tdc->lock, flags);
671}
672
673static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
674{
675 struct tegra_dma_channel *tdc = dev_id;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300676 u32 status;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530677
Dmitry Osipenko6de88ea2020-03-20 00:23:20 +0300678 spin_lock(&tdc->lock);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530679
Ben Dooks95f295f2018-11-21 16:13:23 +0000680 trace_tegra_dma_isr(&tdc->dma_chan, irq);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530681 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
682 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
683 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
684 tdc->isr_handler(tdc, false);
685 tasklet_schedule(&tdc->tasklet);
Dmitry Osipenko66972552020-03-20 00:23:21 +0300686 wake_up_all(&tdc->wq);
Dmitry Osipenko6de88ea2020-03-20 00:23:20 +0300687 spin_unlock(&tdc->lock);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530688 return IRQ_HANDLED;
689 }
690
Dmitry Osipenko6de88ea2020-03-20 00:23:20 +0300691 spin_unlock(&tdc->lock);
Dmitry Osipenko39642932020-02-09 19:33:45 +0300692 dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
693 status);
694
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530695 return IRQ_NONE;
696}
697
698static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
699{
700 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
701 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
702 unsigned long flags;
703 dma_cookie_t cookie;
704
705 spin_lock_irqsave(&tdc->lock, flags);
706 dma_desc->dma_status = DMA_IN_PROGRESS;
707 cookie = dma_cookie_assign(&dma_desc->txd);
708 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
709 spin_unlock_irqrestore(&tdc->lock, flags);
Dmitry Osipenko39642932020-02-09 19:33:45 +0300710
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530711 return cookie;
712}
713
714static void tegra_dma_issue_pending(struct dma_chan *dc)
715{
716 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
717 unsigned long flags;
Dmitry Osipenko84a3f372020-02-09 19:33:49 +0300718 int err;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530719
720 spin_lock_irqsave(&tdc->lock, flags);
721 if (list_empty(&tdc->pending_sg_req)) {
722 dev_err(tdc2dev(tdc), "No DMA request\n");
723 goto end;
724 }
725 if (!tdc->busy) {
Dmitry Osipenko84a3f372020-02-09 19:33:49 +0300726 err = pm_runtime_get_sync(tdc->tdma->dev);
727 if (err < 0) {
728 dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
729 goto end;
730 }
731
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530732 tdc_start_head_req(tdc);
733
734 /* Continuous single mode: Configure next req */
735 if (tdc->cyclic) {
736 /*
737 * Wait for 1 burst time for configure DMA for
738 * next transfer.
739 */
740 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
741 tdc_configure_next_head_desc(tdc);
742 }
743 }
744end:
745 spin_unlock_irqrestore(&tdc->lock, flags);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530746}
747
Vinod Koula7c439a2014-12-08 11:30:17 +0530748static int tegra_dma_terminate_all(struct dma_chan *dc)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530749{
750 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530751 struct tegra_dma_desc *dma_desc;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300752 struct tegra_dma_sg_req *sgreq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530753 unsigned long flags;
Dmitry Osipenko39642932020-02-09 19:33:45 +0300754 u32 status, wcount;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530755 bool was_busy;
756
757 spin_lock_irqsave(&tdc->lock, flags);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530758
759 if (!tdc->busy)
760 goto skip_dma_stop;
761
762 /* Pause DMA before checking the queue status */
Laxman Dewangan1b140902013-01-06 21:52:02 +0530763 tegra_dma_pause(tdc, true);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530764
765 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
766 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
767 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
768 tdc->isr_handler(tdc, true);
769 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
770 }
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700771 if (tdc->tdma->chip_data->support_separate_wcount_reg)
772 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
773 else
774 wcount = status;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530775
776 was_busy = tdc->busy;
777 tegra_dma_stop(tdc);
778
779 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
Dmitry Osipenko39642932020-02-09 19:33:45 +0300780 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
781 node);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530782 sgreq->dma_desc->bytes_transferred +=
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700783 get_current_xferred_count(tdc, sgreq, wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530784 }
Laxman Dewangan1b140902013-01-06 21:52:02 +0530785 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530786
Dmitry Osipenko84a3f372020-02-09 19:33:49 +0300787 pm_runtime_put(tdc->tdma->dev);
Dmitry Osipenko66972552020-03-20 00:23:21 +0300788 wake_up_all(&tdc->wq);
Dmitry Osipenko84a3f372020-02-09 19:33:49 +0300789
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530790skip_dma_stop:
791 tegra_dma_abort_all(tdc);
792
793 while (!list_empty(&tdc->cb_desc)) {
Dmitry Osipenko39642932020-02-09 19:33:45 +0300794 dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
795 cb_node);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530796 list_del(&dma_desc->cb_node);
797 dma_desc->cb_count = 0;
798 }
799 spin_unlock_irqrestore(&tdc->lock, flags);
Dmitry Osipenko39642932020-02-09 19:33:45 +0300800
Vinod Koula7c439a2014-12-08 11:30:17 +0530801 return 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530802}
803
Dmitry Osipenko66972552020-03-20 00:23:21 +0300804static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
805{
806 unsigned long flags;
807 u32 status;
808
809 spin_lock_irqsave(&tdc->lock, flags);
810 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
811 spin_unlock_irqrestore(&tdc->lock, flags);
812
813 return !(status & TEGRA_APBDMA_STATUS_ISE_EOC);
814}
815
Dmitry Osipenkodda5e352020-02-09 19:33:40 +0300816static void tegra_dma_synchronize(struct dma_chan *dc)
817{
818 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
819
Dmitry Osipenko66972552020-03-20 00:23:21 +0300820 /*
821 * CPU, which handles interrupt, could be busy in
822 * uninterruptible state, in this case sibling CPU
823 * should wait until interrupt is handled.
824 */
825 wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
826
Dmitry Osipenkodda5e352020-02-09 19:33:40 +0300827 tasklet_kill(&tdc->tasklet);
828}
829
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300830static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
831 struct tegra_dma_sg_req *sg_req)
832{
Dmitry Osipenko39642932020-02-09 19:33:45 +0300833 u32 status, wcount = 0;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300834
835 if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
836 return 0;
837
838 if (tdc->tdma->chip_data->support_separate_wcount_reg)
839 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
840
841 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
842
843 if (!tdc->tdma->chip_data->support_separate_wcount_reg)
844 wcount = status;
845
846 if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
847 return sg_req->req_len;
848
849 wcount = get_current_xferred_count(tdc, sg_req, wcount);
850
851 if (!wcount) {
852 /*
853 * If wcount wasn't ever polled for this SG before, then
854 * simply assume that transfer hasn't started yet.
855 *
856 * Otherwise it's the end of the transfer.
857 *
858 * The alternative would be to poll the status register
859 * until EOC bit is set or wcount goes UP. That's so
860 * because EOC bit is getting set only after the last
861 * burst's completion and counter is less than the actual
862 * transfer size by 4 bytes. The counter value wraps around
863 * in a cyclic mode before EOC is set(!), so we can't easily
864 * distinguish start of transfer from its end.
865 */
866 if (sg_req->words_xferred)
867 wcount = sg_req->req_len - 4;
868
869 } else if (wcount < sg_req->words_xferred) {
870 /*
871 * This case will never happen for a non-cyclic transfer.
872 *
873 * For a cyclic transfer, although it is possible for the
874 * next transfer to have already started (resetting the word
875 * count), this case should still not happen because we should
876 * have detected that the EOC bit is set and hence the transfer
877 * was completed.
878 */
879 WARN_ON_ONCE(1);
880
881 wcount = sg_req->req_len - 4;
882 } else {
883 sg_req->words_xferred = wcount;
884 }
885
886 return wcount;
887}
888
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530889static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300890 dma_cookie_t cookie,
891 struct dma_tx_state *txstate)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530892{
893 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
894 struct tegra_dma_desc *dma_desc;
895 struct tegra_dma_sg_req *sg_req;
896 enum dma_status ret;
897 unsigned long flags;
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530898 unsigned int residual;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300899 unsigned int bytes = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530900
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530901 ret = dma_cookie_status(dc, cookie, txstate);
Jon Hunterd3183442016-06-29 17:08:39 +0100902 if (ret == DMA_COMPLETE)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530903 return ret;
Andy Shevchenko0a0aee22013-05-27 15:14:39 +0300904
905 spin_lock_irqsave(&tdc->lock, flags);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530906
907 /* Check on wait_ack desc status */
908 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
909 if (dma_desc->txd.cookie == cookie) {
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530910 ret = dma_desc->dma_status;
Jon Hunter004f6142016-06-29 17:08:38 +0100911 goto found;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530912 }
913 }
914
915 /* Check in pending list */
916 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
917 dma_desc = sg_req->dma_desc;
918 if (dma_desc->txd.cookie == cookie) {
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300919 bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530920 ret = dma_desc->dma_status;
Jon Hunter004f6142016-06-29 17:08:38 +0100921 goto found;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530922 }
923 }
924
Jon Hunter019bfcc2016-06-29 17:08:37 +0100925 dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
Jon Hunter004f6142016-06-29 17:08:38 +0100926 dma_desc = NULL;
927
928found:
Jon Hunterd3183442016-06-29 17:08:39 +0100929 if (dma_desc && txstate) {
Jon Hunter004f6142016-06-29 17:08:38 +0100930 residual = dma_desc->bytes_requested -
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300931 ((dma_desc->bytes_transferred + bytes) %
Jon Hunter004f6142016-06-29 17:08:38 +0100932 dma_desc->bytes_requested);
933 dma_set_residue(txstate, residual);
934 }
935
Ben Dooks95f295f2018-11-21 16:13:23 +0000936 trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530937 spin_unlock_irqrestore(&tdc->lock, flags);
Dmitry Osipenko39642932020-02-09 19:33:45 +0300938
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530939 return ret;
940}
941
Dmitry Osipenko39642932020-02-09 19:33:45 +0300942static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
943 enum dma_slave_buswidth slave_bw)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530944{
945 switch (slave_bw) {
946 case DMA_SLAVE_BUSWIDTH_1_BYTE:
947 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
948 case DMA_SLAVE_BUSWIDTH_2_BYTES:
949 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
950 case DMA_SLAVE_BUSWIDTH_4_BYTES:
951 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
952 case DMA_SLAVE_BUSWIDTH_8_BYTES:
953 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
954 default:
955 dev_warn(tdc2dev(tdc),
Dmitry Osipenko39642932020-02-09 19:33:45 +0300956 "slave bw is not supported, using 32bits\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530957 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
958 }
959}
960
Dmitry Osipenko39642932020-02-09 19:33:45 +0300961static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
962 u32 burst_size,
963 enum dma_slave_buswidth slave_bw,
964 u32 len)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530965{
Dmitry Osipenko39642932020-02-09 19:33:45 +0300966 unsigned int burst_byte, burst_ahb_width;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530967
968 /*
969 * burst_size from client is in terms of the bus_width.
970 * convert them into AHB memory width which is 4 byte.
971 */
972 burst_byte = burst_size * slave_bw;
973 burst_ahb_width = burst_byte / 4;
974
975 /* If burst size is 0 then calculate the burst size based on length */
976 if (!burst_ahb_width) {
977 if (len & 0xF)
978 return TEGRA_APBDMA_AHBSEQ_BURST_1;
979 else if ((len >> 4) & 0x1)
980 return TEGRA_APBDMA_AHBSEQ_BURST_4;
981 else
982 return TEGRA_APBDMA_AHBSEQ_BURST_8;
983 }
984 if (burst_ahb_width < 4)
985 return TEGRA_APBDMA_AHBSEQ_BURST_1;
986 else if (burst_ahb_width < 8)
987 return TEGRA_APBDMA_AHBSEQ_BURST_4;
988 else
989 return TEGRA_APBDMA_AHBSEQ_BURST_8;
990}
991
992static int get_transfer_param(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +0300993 enum dma_transfer_direction direction,
994 u32 *apb_addr,
995 u32 *apb_seq,
996 u32 *csr,
997 unsigned int *burst_size,
998 enum dma_slave_buswidth *slave_bw)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530999{
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301000 switch (direction) {
1001 case DMA_MEM_TO_DEV:
1002 *apb_addr = tdc->dma_sconfig.dst_addr;
1003 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
1004 *burst_size = tdc->dma_sconfig.dst_maxburst;
1005 *slave_bw = tdc->dma_sconfig.dst_addr_width;
1006 *csr = TEGRA_APBDMA_CSR_DIR;
1007 return 0;
1008
1009 case DMA_DEV_TO_MEM:
1010 *apb_addr = tdc->dma_sconfig.src_addr;
1011 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
1012 *burst_size = tdc->dma_sconfig.src_maxburst;
1013 *slave_bw = tdc->dma_sconfig.src_addr_width;
1014 *csr = 0;
1015 return 0;
1016
1017 default:
Ben Dooks547b3112018-11-21 16:13:21 +00001018 dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
Dmitry Osipenko39642932020-02-09 19:33:45 +03001019 break;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301020 }
Dmitry Osipenko39642932020-02-09 19:33:45 +03001021
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301022 return -EINVAL;
1023}
1024
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001025static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
Dmitry Osipenko39642932020-02-09 19:33:45 +03001026 struct tegra_dma_channel_regs *ch_regs,
1027 u32 len)
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001028{
1029 u32 len_field = (len - 4) & 0xFFFC;
1030
1031 if (tdc->tdma->chip_data->support_separate_wcount_reg)
1032 ch_regs->wcount = len_field;
1033 else
1034 ch_regs->csr |= len_field;
1035}
1036
Dmitry Osipenko39642932020-02-09 19:33:45 +03001037static struct dma_async_tx_descriptor *
1038tegra_dma_prep_slave_sg(struct dma_chan *dc,
1039 struct scatterlist *sgl,
1040 unsigned int sg_len,
1041 enum dma_transfer_direction direction,
1042 unsigned long flags,
1043 void *context)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301044{
1045 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Dmitry Osipenko39642932020-02-09 19:33:45 +03001046 struct tegra_dma_sg_req *sg_req = NULL;
1047 u32 csr, ahb_seq, apb_ptr, apb_seq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301048 enum dma_slave_buswidth slave_bw;
Dmitry Osipenko39642932020-02-09 19:33:45 +03001049 struct tegra_dma_desc *dma_desc;
1050 struct list_head req_list;
1051 struct scatterlist *sg;
1052 unsigned int burst_size;
1053 unsigned int i;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301054
1055 if (!tdc->config_init) {
Ben Dooks547b3112018-11-21 16:13:21 +00001056 dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301057 return NULL;
1058 }
1059 if (sg_len < 1) {
1060 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1061 return NULL;
1062 }
1063
Jon Hunterdc1ff4b2015-08-06 14:32:32 +01001064 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
Dmitry Osipenko39642932020-02-09 19:33:45 +03001065 &burst_size, &slave_bw) < 0)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301066 return NULL;
1067
1068 INIT_LIST_HEAD(&req_list);
1069
1070 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1071 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1072 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1073 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1074
Dmitry Osipenkof6160f32017-11-16 20:11:06 +03001075 csr |= TEGRA_APBDMA_CSR_ONCE;
1076
1077 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1078 csr |= TEGRA_APBDMA_CSR_FLOW;
1079 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1080 }
1081
Dmitry Osipenkodc161062019-05-30 00:43:55 +03001082 if (flags & DMA_PREP_INTERRUPT) {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301083 csr |= TEGRA_APBDMA_CSR_IE_EOC;
Dmitry Osipenkodc161062019-05-30 00:43:55 +03001084 } else {
1085 WARN_ON_ONCE(1);
1086 return NULL;
1087 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301088
1089 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1090
1091 dma_desc = tegra_dma_desc_get(tdc);
1092 if (!dma_desc) {
Ben Dooks547b3112018-11-21 16:13:21 +00001093 dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301094 return NULL;
1095 }
1096 INIT_LIST_HEAD(&dma_desc->tx_list);
1097 INIT_LIST_HEAD(&dma_desc->cb_node);
1098 dma_desc->cb_count = 0;
1099 dma_desc->bytes_requested = 0;
1100 dma_desc->bytes_transferred = 0;
1101 dma_desc->dma_status = DMA_IN_PROGRESS;
1102
1103 /* Make transfer requests */
1104 for_each_sg(sgl, sg, sg_len, i) {
1105 u32 len, mem;
1106
Laxman Dewangan597c8542012-06-22 20:41:10 +05301107 mem = sg_dma_address(sg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301108 len = sg_dma_len(sg);
1109
1110 if ((len & 3) || (mem & 3) ||
Dmitry Osipenko39642932020-02-09 19:33:45 +03001111 len > tdc->tdma->chip_data->max_dma_count) {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301112 dev_err(tdc2dev(tdc),
Ben Dooks547b3112018-11-21 16:13:21 +00001113 "DMA length/memory address is not supported\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301114 tegra_dma_desc_put(tdc, dma_desc);
1115 return NULL;
1116 }
1117
1118 sg_req = tegra_dma_sg_req_get(tdc);
1119 if (!sg_req) {
Ben Dooks547b3112018-11-21 16:13:21 +00001120 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301121 tegra_dma_desc_put(tdc, dma_desc);
1122 return NULL;
1123 }
1124
1125 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1126 dma_desc->bytes_requested += len;
1127
1128 sg_req->ch_regs.apb_ptr = apb_ptr;
1129 sg_req->ch_regs.ahb_ptr = mem;
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001130 sg_req->ch_regs.csr = csr;
1131 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301132 sg_req->ch_regs.apb_seq = apb_seq;
1133 sg_req->ch_regs.ahb_seq = ahb_seq;
1134 sg_req->configured = false;
1135 sg_req->last_sg = false;
1136 sg_req->dma_desc = dma_desc;
1137 sg_req->req_len = len;
1138
1139 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1140 }
1141 sg_req->last_sg = true;
1142 if (flags & DMA_CTRL_ACK)
1143 dma_desc->txd.flags = DMA_CTRL_ACK;
1144
1145 /*
1146 * Make sure that mode should not be conflicting with currently
1147 * configured mode.
1148 */
1149 if (!tdc->isr_handler) {
1150 tdc->isr_handler = handle_once_dma_done;
1151 tdc->cyclic = false;
1152 } else {
1153 if (tdc->cyclic) {
1154 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1155 tegra_dma_desc_put(tdc, dma_desc);
1156 return NULL;
1157 }
1158 }
1159
1160 return &dma_desc->txd;
1161}
1162
Dmitry Osipenko39642932020-02-09 19:33:45 +03001163static struct dma_async_tx_descriptor *
1164tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
1165 size_t buf_len,
1166 size_t period_len,
1167 enum dma_transfer_direction direction,
1168 unsigned long flags)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301169{
1170 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Thierry Reding7b0e00d2016-06-14 16:18:46 +02001171 struct tegra_dma_sg_req *sg_req = NULL;
Dmitry Osipenko39642932020-02-09 19:33:45 +03001172 u32 csr, ahb_seq, apb_ptr, apb_seq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301173 enum dma_slave_buswidth slave_bw;
Dmitry Osipenko39642932020-02-09 19:33:45 +03001174 struct tegra_dma_desc *dma_desc;
1175 dma_addr_t mem = buf_addr;
1176 unsigned int burst_size;
1177 size_t len, remain_len;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301178
1179 if (!buf_len || !period_len) {
1180 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1181 return NULL;
1182 }
1183
1184 if (!tdc->config_init) {
1185 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1186 return NULL;
1187 }
1188
1189 /*
1190 * We allow to take more number of requests till DMA is
1191 * not started. The driver will loop over all requests.
1192 * Once DMA is started then new requests can be queued only after
1193 * terminating the DMA.
1194 */
1195 if (tdc->busy) {
Ben Dooks547b3112018-11-21 16:13:21 +00001196 dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301197 return NULL;
1198 }
1199
1200 /*
1201 * We only support cycle transfer when buf_len is multiple of
1202 * period_len.
1203 */
1204 if (buf_len % period_len) {
1205 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1206 return NULL;
1207 }
1208
1209 len = period_len;
1210 if ((len & 3) || (buf_addr & 3) ||
Dmitry Osipenko39642932020-02-09 19:33:45 +03001211 len > tdc->tdma->chip_data->max_dma_count) {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301212 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1213 return NULL;
1214 }
1215
Jon Hunterdc1ff4b2015-08-06 14:32:32 +01001216 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
Dmitry Osipenko39642932020-02-09 19:33:45 +03001217 &burst_size, &slave_bw) < 0)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301218 return NULL;
1219
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301220 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1221 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1222 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1223 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1224
Dmitry Osipenkof6160f32017-11-16 20:11:06 +03001225 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1226 csr |= TEGRA_APBDMA_CSR_FLOW;
1227 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1228 }
1229
Dmitry Osipenkodc161062019-05-30 00:43:55 +03001230 if (flags & DMA_PREP_INTERRUPT) {
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301231 csr |= TEGRA_APBDMA_CSR_IE_EOC;
Dmitry Osipenkodc161062019-05-30 00:43:55 +03001232 } else {
1233 WARN_ON_ONCE(1);
1234 return NULL;
1235 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301236
1237 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1238
1239 dma_desc = tegra_dma_desc_get(tdc);
1240 if (!dma_desc) {
1241 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1242 return NULL;
1243 }
1244
1245 INIT_LIST_HEAD(&dma_desc->tx_list);
1246 INIT_LIST_HEAD(&dma_desc->cb_node);
1247 dma_desc->cb_count = 0;
1248
1249 dma_desc->bytes_transferred = 0;
1250 dma_desc->bytes_requested = buf_len;
1251 remain_len = buf_len;
1252
1253 /* Split transfer equal to period size */
1254 while (remain_len) {
1255 sg_req = tegra_dma_sg_req_get(tdc);
1256 if (!sg_req) {
Ben Dooks547b3112018-11-21 16:13:21 +00001257 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301258 tegra_dma_desc_put(tdc, dma_desc);
1259 return NULL;
1260 }
1261
1262 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1263 sg_req->ch_regs.apb_ptr = apb_ptr;
1264 sg_req->ch_regs.ahb_ptr = mem;
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001265 sg_req->ch_regs.csr = csr;
1266 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301267 sg_req->ch_regs.apb_seq = apb_seq;
1268 sg_req->ch_regs.ahb_seq = ahb_seq;
1269 sg_req->configured = false;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301270 sg_req->last_sg = false;
1271 sg_req->dma_desc = dma_desc;
1272 sg_req->req_len = len;
1273
1274 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1275 remain_len -= len;
1276 mem += len;
1277 }
1278 sg_req->last_sg = true;
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301279 if (flags & DMA_CTRL_ACK)
1280 dma_desc->txd.flags = DMA_CTRL_ACK;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301281
1282 /*
1283 * Make sure that mode should not be conflicting with currently
1284 * configured mode.
1285 */
1286 if (!tdc->isr_handler) {
1287 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1288 tdc->cyclic = true;
1289 } else {
1290 if (!tdc->cyclic) {
1291 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1292 tegra_dma_desc_put(tdc, dma_desc);
1293 return NULL;
1294 }
1295 }
1296
1297 return &dma_desc->txd;
1298}
1299
1300static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1301{
1302 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1303
1304 dma_cookie_init(&tdc->dma_chan);
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001305
1306 return 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301307}
1308
1309static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1310{
1311 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301312 struct tegra_dma_desc *dma_desc;
1313 struct tegra_dma_sg_req *sg_req;
1314 struct list_head dma_desc_list;
1315 struct list_head sg_req_list;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301316
1317 INIT_LIST_HEAD(&dma_desc_list);
1318 INIT_LIST_HEAD(&sg_req_list);
1319
1320 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1321
Dmitry Osipenko8e841722020-02-09 19:33:41 +03001322 tegra_dma_terminate_all(dc);
Dmitry Osipenko41ffc422020-02-09 19:33:42 +03001323 tasklet_kill(&tdc->tasklet);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301324
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301325 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1326 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1327 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1328 INIT_LIST_HEAD(&tdc->cb_desc);
1329 tdc->config_init = false;
Dmitry Osipenko7bdc1e22013-05-11 20:30:53 +04001330 tdc->isr_handler = NULL;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301331
1332 while (!list_empty(&dma_desc_list)) {
Dmitry Osipenko39642932020-02-09 19:33:45 +03001333 dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc),
1334 node);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301335 list_del(&dma_desc->node);
1336 kfree(dma_desc);
1337 }
1338
1339 while (!list_empty(&sg_req_list)) {
1340 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1341 list_del(&sg_req->node);
1342 kfree(sg_req);
1343 }
Stephen Warren996556c2013-11-11 13:09:35 -07001344
Shardar Shariff Md00ef4492016-04-23 15:06:00 +05301345 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
Stephen Warren996556c2013-11-11 13:09:35 -07001346}
1347
1348static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1349 struct of_dma *ofdma)
1350{
1351 struct tegra_dma *tdma = ofdma->of_dma_data;
Stephen Warren996556c2013-11-11 13:09:35 -07001352 struct tegra_dma_channel *tdc;
Dmitry Osipenko39642932020-02-09 19:33:45 +03001353 struct dma_chan *chan;
Stephen Warren996556c2013-11-11 13:09:35 -07001354
Shardar Shariff Md00ef4492016-04-23 15:06:00 +05301355 if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1356 dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1357 return NULL;
1358 }
1359
Stephen Warren996556c2013-11-11 13:09:35 -07001360 chan = dma_get_any_slave_channel(&tdma->dma_dev);
1361 if (!chan)
1362 return NULL;
1363
1364 tdc = to_tegra_dma_chan(chan);
1365 tdc->slave_id = dma_spec->args[0];
1366
1367 return chan;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301368}
1369
1370/* Tegra20 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001371static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301372 .nr_channels = 16,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001373 .channel_reg_size = 0x20,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301374 .max_dma_count = 1024UL * 64,
Laxman Dewangan1b140902013-01-06 21:52:02 +05301375 .support_channel_pause = false,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001376 .support_separate_wcount_reg = false,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301377};
1378
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301379/* Tegra30 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001380static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301381 .nr_channels = 32,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001382 .channel_reg_size = 0x20,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301383 .max_dma_count = 1024UL * 64,
Laxman Dewangan1b140902013-01-06 21:52:02 +05301384 .support_channel_pause = false,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001385 .support_separate_wcount_reg = false,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301386};
1387
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301388/* Tegra114 specific DMA controller information */
1389static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1390 .nr_channels = 32,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001391 .channel_reg_size = 0x20,
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301392 .max_dma_count = 1024UL * 64,
1393 .support_channel_pause = true,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001394 .support_separate_wcount_reg = false,
1395};
1396
1397/* Tegra148 specific DMA controller information */
1398static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1399 .nr_channels = 32,
1400 .channel_reg_size = 0x40,
1401 .max_dma_count = 1024UL * 64,
1402 .support_channel_pause = true,
1403 .support_separate_wcount_reg = true,
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301404};
1405
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001406static int tegra_dma_init_hw(struct tegra_dma *tdma)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301407{
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001408 int err;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301409
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001410 err = reset_control_assert(tdma->rst);
1411 if (err) {
1412 dev_err(tdma->dev, "failed to assert reset: %d\n", err);
1413 return err;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301414 }
1415
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001416 err = clk_enable(tdma->dma_clk);
1417 if (err) {
1418 dev_err(tdma->dev, "failed to enable clk: %d\n", err);
1419 return err;
1420 }
1421
1422 /* reset DMA controller */
1423 udelay(2);
1424 reset_control_deassert(tdma->rst);
1425
1426 /* enable global DMA registers */
1427 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1428 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1429 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF);
1430
1431 clk_disable(tdma->dma_clk);
1432
1433 return 0;
1434}
1435
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301436static int tegra_dma_probe(struct platform_device *pdev)
1437{
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301438 const struct tegra_dma_chip_data *cdata;
Dmitry Osipenko39642932020-02-09 19:33:45 +03001439 struct tegra_dma *tdma;
1440 unsigned int i;
1441 size_t size;
1442 int ret;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301443
1444 cdata = of_device_get_match_data(&pdev->dev);
Dmitry Osipenko39642932020-02-09 19:33:45 +03001445 size = struct_size(tdma, channels, cdata->nr_channels);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301446
Dmitry Osipenko39642932020-02-09 19:33:45 +03001447 tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
Peter Griffinaef94fe2016-06-07 18:38:41 +01001448 if (!tdma)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301449 return -ENOMEM;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301450
1451 tdma->dev = &pdev->dev;
1452 tdma->chip_data = cdata;
1453 platform_set_drvdata(pdev, tdma);
1454
Dmitry Osipenkoc55c745e2020-02-09 19:33:43 +03001455 tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
Thierry Reding73312052013-01-21 11:09:00 +01001456 if (IS_ERR(tdma->base_addr))
1457 return PTR_ERR(tdma->base_addr);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301458
1459 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1460 if (IS_ERR(tdma->dma_clk)) {
1461 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1462 return PTR_ERR(tdma->dma_clk);
1463 }
1464
Stephen Warren9aa433d2013-11-06 16:35:34 -07001465 tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1466 if (IS_ERR(tdma->rst)) {
1467 dev_err(&pdev->dev, "Error: Missing reset\n");
1468 return PTR_ERR(tdma->rst);
1469 }
1470
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301471 spin_lock_init(&tdma->global_lock);
1472
Dmitry Osipenko84a3f372020-02-09 19:33:49 +03001473 ret = clk_prepare(tdma->dma_clk);
1474 if (ret)
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001475 return ret;
Laxman Dewanganffc49302012-07-20 13:31:08 +05301476
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001477 ret = tegra_dma_init_hw(tdma);
1478 if (ret)
1479 goto err_clk_unprepare;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301480
Dmitry Osipenko84a3f372020-02-09 19:33:49 +03001481 pm_runtime_irq_safe(&pdev->dev);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301482 pm_runtime_enable(&pdev->dev);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301483
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301484 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1485 for (i = 0; i < cdata->nr_channels; i++) {
1486 struct tegra_dma_channel *tdc = &tdma->channels[i];
Dmitry Osipenko2cd3d132020-02-09 19:33:44 +03001487 int irq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301488
Jon Hunter13a33282015-08-06 14:32:31 +01001489 tdc->chan_addr = tdma->base_addr +
1490 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1491 (i * cdata->channel_reg_size);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301492
Dmitry Osipenko2cd3d132020-02-09 19:33:44 +03001493 irq = platform_get_irq(pdev, i);
1494 if (irq < 0) {
1495 ret = irq;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301496 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
Dmitry Osipenko2cd3d132020-02-09 19:33:44 +03001497 goto err_pm_disable;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301498 }
Dmitry Osipenko2cd3d132020-02-09 19:33:44 +03001499
Laxman Dewangand0fc9052012-10-03 22:48:07 +05301500 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
Dmitry Osipenko2cd3d132020-02-09 19:33:44 +03001501 ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0,
1502 tdc->name, tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301503 if (ret) {
1504 dev_err(&pdev->dev,
1505 "request_irq failed with err %d channel %d\n",
Dmitry Osipenkoac7ae752013-05-11 20:30:52 +04001506 ret, i);
Dmitry Osipenko2cd3d132020-02-09 19:33:44 +03001507 goto err_pm_disable;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301508 }
1509
1510 tdc->dma_chan.device = &tdma->dma_dev;
1511 dma_cookie_init(&tdc->dma_chan);
1512 list_add_tail(&tdc->dma_chan.device_node,
Dmitry Osipenko39642932020-02-09 19:33:45 +03001513 &tdma->dma_dev.channels);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301514 tdc->tdma = tdma;
1515 tdc->id = i;
Shardar Shariff Md00ef4492016-04-23 15:06:00 +05301516 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301517
1518 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
Dmitry Osipenko39642932020-02-09 19:33:45 +03001519 (unsigned long)tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301520 spin_lock_init(&tdc->lock);
Dmitry Osipenko66972552020-03-20 00:23:21 +03001521 init_waitqueue_head(&tdc->wq);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301522
1523 INIT_LIST_HEAD(&tdc->pending_sg_req);
1524 INIT_LIST_HEAD(&tdc->free_sg_req);
1525 INIT_LIST_HEAD(&tdc->free_dma_desc);
1526 INIT_LIST_HEAD(&tdc->cb_desc);
1527 }
1528
1529 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1530 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
Laxman Dewangan46fb3f82012-06-22 17:12:43 +05301531 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1532
Jon Hunter23a1ec32015-08-06 14:32:33 +01001533 tdma->global_pause_count = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301534 tdma->dma_dev.dev = &pdev->dev;
1535 tdma->dma_dev.device_alloc_chan_resources =
1536 tegra_dma_alloc_chan_resources;
1537 tdma->dma_dev.device_free_chan_resources =
1538 tegra_dma_free_chan_resources;
1539 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1540 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
Paul Walmsley891653a2015-01-06 06:44:56 +00001541 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1542 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1543 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1544 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1545 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1546 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1547 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1548 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1549 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
Dmitry Osipenko156a5992019-07-05 18:05:19 +03001550 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
Maxime Ripard662f1ac2014-11-17 14:42:37 +01001551 tdma->dma_dev.device_config = tegra_dma_slave_config;
1552 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
Dmitry Osipenkodda5e352020-02-09 19:33:40 +03001553 tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301554 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1555 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1556
1557 ret = dma_async_device_register(&tdma->dma_dev);
1558 if (ret < 0) {
1559 dev_err(&pdev->dev,
1560 "Tegra20 APB DMA driver registration failed %d\n", ret);
Dmitry Osipenko2cd3d132020-02-09 19:33:44 +03001561 goto err_pm_disable;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301562 }
1563
Stephen Warren996556c2013-11-11 13:09:35 -07001564 ret = of_dma_controller_register(pdev->dev.of_node,
1565 tegra_dma_of_xlate, tdma);
1566 if (ret < 0) {
1567 dev_err(&pdev->dev,
1568 "Tegra20 APB DMA OF registration failed %d\n", ret);
1569 goto err_unregister_dma_dev;
1570 }
1571
Dmitry Osipenko39642932020-02-09 19:33:45 +03001572 dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n",
1573 cdata->nr_channels);
1574
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301575 return 0;
1576
Stephen Warren996556c2013-11-11 13:09:35 -07001577err_unregister_dma_dev:
1578 dma_async_device_unregister(&tdma->dma_dev);
Jon Hunter05e866b2015-11-13 16:39:43 +00001579
Dmitry Osipenko2cd3d132020-02-09 19:33:44 +03001580err_pm_disable:
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301581 pm_runtime_disable(&pdev->dev);
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001582
1583err_clk_unprepare:
Dmitry Osipenko84a3f372020-02-09 19:33:49 +03001584 clk_unprepare(tdma->dma_clk);
Dmitry Osipenko39642932020-02-09 19:33:45 +03001585
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301586 return ret;
1587}
1588
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001589static int tegra_dma_remove(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301590{
1591 struct tegra_dma *tdma = platform_get_drvdata(pdev);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301592
Dmitry Osipenko16e2b3e2020-02-09 19:33:51 +03001593 of_dma_controller_free(pdev->dev.of_node);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301594 dma_async_device_unregister(&tdma->dma_dev);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301595 pm_runtime_disable(&pdev->dev);
Dmitry Osipenko84a3f372020-02-09 19:33:49 +03001596 clk_unprepare(tdma->dma_clk);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301597
1598 return 0;
1599}
1600
YueHaibinga48d44c2020-03-20 15:13:37 +08001601static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301602{
Jon Hunter286a6442015-11-13 16:39:39 +00001603 struct tegra_dma *tdma = dev_get_drvdata(dev);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301604
Dmitry Osipenko84a3f372020-02-09 19:33:49 +03001605 clk_disable(tdma->dma_clk);
Jon Hunter65a5c3d2017-06-06 13:49:29 +01001606
Laxman Dewangan3065c192013-04-24 15:24:27 +05301607 return 0;
1608}
1609
YueHaibinga48d44c2020-03-20 15:13:37 +08001610static int __maybe_unused tegra_dma_runtime_resume(struct device *dev)
Laxman Dewangan3065c192013-04-24 15:24:27 +05301611{
1612 struct tegra_dma *tdma = dev_get_drvdata(dev);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301613
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001614 return clk_enable(tdma->dma_clk);
1615}
Laxman Dewangan3065c192013-04-24 15:24:27 +05301616
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001617static int __maybe_unused tegra_dma_dev_suspend(struct device *dev)
1618{
1619 struct tegra_dma *tdma = dev_get_drvdata(dev);
1620 unsigned long flags;
Dmitry Osipenko39642932020-02-09 19:33:45 +03001621 unsigned int i;
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001622 bool busy;
Laxman Dewangan3065c192013-04-24 15:24:27 +05301623
1624 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1625 struct tegra_dma_channel *tdc = &tdma->channels[i];
Laxman Dewangan3065c192013-04-24 15:24:27 +05301626
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001627 tasklet_kill(&tdc->tasklet);
Jon Hunter4aad5be2015-11-13 16:39:41 +00001628
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001629 spin_lock_irqsave(&tdc->lock, flags);
1630 busy = tdc->busy;
1631 spin_unlock_irqrestore(&tdc->lock, flags);
1632
1633 if (busy) {
1634 dev_err(tdma->dev, "channel %u busy\n", i);
1635 return -EBUSY;
1636 }
Laxman Dewangan3065c192013-04-24 15:24:27 +05301637 }
1638
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001639 return pm_runtime_force_suspend(dev);
1640}
1641
1642static int __maybe_unused tegra_dma_dev_resume(struct device *dev)
1643{
1644 struct tegra_dma *tdma = dev_get_drvdata(dev);
1645 int err;
1646
1647 err = tegra_dma_init_hw(tdma);
1648 if (err)
1649 return err;
1650
1651 return pm_runtime_force_resume(dev);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301652}
Laxman Dewangan3065c192013-04-24 15:24:27 +05301653
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001654static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001655 SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1656 NULL)
Dmitry Osipenkodcb394b2020-02-09 19:33:50 +03001657 SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301658};
1659
Laxman Dewangan242637b2016-03-04 15:55:11 +05301660static const struct of_device_id tegra_dma_of_match[] = {
1661 {
1662 .compatible = "nvidia,tegra148-apbdma",
1663 .data = &tegra148_dma_chip_data,
1664 }, {
1665 .compatible = "nvidia,tegra114-apbdma",
1666 .data = &tegra114_dma_chip_data,
1667 }, {
1668 .compatible = "nvidia,tegra30-apbdma",
1669 .data = &tegra30_dma_chip_data,
1670 }, {
1671 .compatible = "nvidia,tegra20-apbdma",
1672 .data = &tegra20_dma_chip_data,
1673 }, {
1674 },
1675};
1676MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1677
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301678static struct platform_driver tegra_dmac_driver = {
1679 .driver = {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301680 .name = "tegra-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301681 .pm = &tegra_dma_dev_pm_ops,
Stephen Warrendc7badb2013-03-11 16:30:26 -06001682 .of_match_table = tegra_dma_of_match,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301683 },
1684 .probe = tegra_dma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -05001685 .remove = tegra_dma_remove,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301686};
1687
1688module_platform_driver(tegra_dmac_driver);
1689
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301690MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1691MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1692MODULE_LICENSE("GPL v2");