blob: 4468ded157809057c27913b2520784e9130936a3 [file] [log] [blame]
Thomas Gleixner9952f692019-05-28 10:10:04 -07001// SPDX-License-Identifier: GPL-2.0-only
Laxman Dewanganec8a1582012-06-06 10:55:27 +05302/*
3 * DMA driver for Nvidia's Tegra20 APB DMA controller.
4 *
Stephen Warren996556c2013-11-11 13:09:35 -07005 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
Laxman Dewanganec8a1582012-06-06 10:55:27 +05306 */
7
8#include <linux/bitops.h>
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h>
Thierry Reding73312052013-01-21 11:09:00 +010013#include <linux/err.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053014#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/mm.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
Stephen Warren996556c2013-11-11 13:09:35 -070021#include <linux/of_dma.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053022#include <linux/platform_device.h>
Laxman Dewangan3065c192013-04-24 15:24:27 +053023#include <linux/pm.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053024#include <linux/pm_runtime.h>
Stephen Warren9aa433d2013-11-06 16:35:34 -070025#include <linux/reset.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053026#include <linux/slab.h>
27
Laxman Dewanganec8a1582012-06-06 10:55:27 +053028#include "dmaengine.h"
29
Ben Dooks95f295f2018-11-21 16:13:23 +000030#define CREATE_TRACE_POINTS
31#include <trace/events/tegra_apb_dma.h>
32
Laxman Dewanganec8a1582012-06-06 10:55:27 +053033#define TEGRA_APBDMA_GENERAL 0x0
34#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
35
36#define TEGRA_APBDMA_CONTROL 0x010
37#define TEGRA_APBDMA_IRQ_MASK 0x01c
38#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
39
40/* CSR register */
41#define TEGRA_APBDMA_CHAN_CSR 0x00
42#define TEGRA_APBDMA_CSR_ENB BIT(31)
43#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
44#define TEGRA_APBDMA_CSR_HOLD BIT(29)
45#define TEGRA_APBDMA_CSR_DIR BIT(28)
46#define TEGRA_APBDMA_CSR_ONCE BIT(27)
47#define TEGRA_APBDMA_CSR_FLOW BIT(21)
48#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
Shardar Shariff Md00ef4492016-04-23 15:06:00 +053049#define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F
Laxman Dewanganec8a1582012-06-06 10:55:27 +053050#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
51
52/* STATUS register */
53#define TEGRA_APBDMA_CHAN_STATUS 0x004
54#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
55#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
56#define TEGRA_APBDMA_STATUS_HALT BIT(29)
57#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
58#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
59#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
60
Laxman Dewangan1b140902013-01-06 21:52:02 +053061#define TEGRA_APBDMA_CHAN_CSRE 0x00C
62#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
63
Laxman Dewanganec8a1582012-06-06 10:55:27 +053064/* AHB memory address */
65#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
66
67/* AHB sequence register */
68#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
69#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
70#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
71#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
72#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
73#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
74#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
75#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
76#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
77#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
78#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
79#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
80#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
81#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
82
83/* APB address */
84#define TEGRA_APBDMA_CHAN_APBPTR 0x018
85
86/* APB sequence register */
87#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
88#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
89#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
90#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
91#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
92#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
93#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
94#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
95
Laxman Dewangan911dacc2014-01-06 11:16:45 -070096/* Tegra148 specific registers */
97#define TEGRA_APBDMA_CHAN_WCOUNT 0x20
98
99#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
100
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530101/*
102 * If any burst is in flight and DMA paused then this is the time to complete
103 * on-flight burst and update DMA status register.
104 */
105#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
106
107/* Channel base address offset from APBDMA base address */
108#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
109
Shardar Shariff Md00ef4492016-04-23 15:06:00 +0530110#define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
111
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530112struct tegra_dma;
113
114/*
115 * tegra_dma_chip_data Tegra chip specific DMA data
116 * @nr_channels: Number of channels available in the controller.
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700117 * @channel_reg_size: Channel register size/stride.
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530118 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
Laxman Dewangan1b140902013-01-06 21:52:02 +0530119 * @support_channel_pause: Support channel wise pause of dma.
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700120 * @support_separate_wcount_reg: Support separate word count register.
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530121 */
122struct tegra_dma_chip_data {
123 int nr_channels;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700124 int channel_reg_size;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530125 int max_dma_count;
Laxman Dewangan1b140902013-01-06 21:52:02 +0530126 bool support_channel_pause;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700127 bool support_separate_wcount_reg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530128};
129
130/* DMA channel registers */
131struct tegra_dma_channel_regs {
132 unsigned long csr;
133 unsigned long ahb_ptr;
134 unsigned long apb_ptr;
135 unsigned long ahb_seq;
136 unsigned long apb_seq;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700137 unsigned long wcount;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530138};
139
140/*
Ben Dooks547b3112018-11-21 16:13:21 +0000141 * tegra_dma_sg_req: DMA request details to configure hardware. This
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530142 * contains the details for one transfer to configure DMA hw.
143 * The client's request for data transfer can be broken into multiple
144 * sub-transfer as per requester details and hw support.
145 * This sub transfer get added in the list of transfer and point to Tegra
146 * DMA descriptor which manages the transfer details.
147 */
148struct tegra_dma_sg_req {
149 struct tegra_dma_channel_regs ch_regs;
Ben Dooks216a1d72018-11-21 16:13:20 +0000150 unsigned int req_len;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530151 bool configured;
152 bool last_sg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530153 struct list_head node;
154 struct tegra_dma_desc *dma_desc;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300155 unsigned int words_xferred;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530156};
157
158/*
159 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
160 * This descriptor keep track of transfer status, callbacks and request
161 * counts etc.
162 */
163struct tegra_dma_desc {
164 struct dma_async_tx_descriptor txd;
Ben Dooks216a1d72018-11-21 16:13:20 +0000165 unsigned int bytes_requested;
166 unsigned int bytes_transferred;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530167 enum dma_status dma_status;
168 struct list_head node;
169 struct list_head tx_list;
170 struct list_head cb_node;
171 int cb_count;
172};
173
174struct tegra_dma_channel;
175
176typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
177 bool to_terminate);
178
179/* tegra_dma_channel: Channel specific information */
180struct tegra_dma_channel {
181 struct dma_chan dma_chan;
Ben Dooks65c383c2018-11-21 16:13:22 +0000182 char name[12];
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530183 bool config_init;
184 int id;
185 int irq;
Jon Hunter13a33282015-08-06 14:32:31 +0100186 void __iomem *chan_addr;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530187 spinlock_t lock;
188 bool busy;
189 struct tegra_dma *tdma;
190 bool cyclic;
191
192 /* Different lists for managing the requests */
193 struct list_head free_sg_req;
194 struct list_head pending_sg_req;
195 struct list_head free_dma_desc;
196 struct list_head cb_desc;
197
198 /* ISR handler and tasklet for bottom half of isr handling */
199 dma_isr_handler isr_handler;
200 struct tasklet_struct tasklet;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530201
202 /* Channel-slave specific configuration */
Stephen Warren996556c2013-11-11 13:09:35 -0700203 unsigned int slave_id;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530204 struct dma_slave_config dma_sconfig;
Laxman Dewangan3065c192013-04-24 15:24:27 +0530205 struct tegra_dma_channel_regs channel_reg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530206};
207
208/* tegra_dma: Tegra DMA specific information */
209struct tegra_dma {
210 struct dma_device dma_dev;
211 struct device *dev;
212 struct clk *dma_clk;
Stephen Warren9aa433d2013-11-06 16:35:34 -0700213 struct reset_control *rst;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530214 spinlock_t global_lock;
215 void __iomem *base_addr;
Laxman Dewangan83a1ef22012-08-29 10:23:07 +0200216 const struct tegra_dma_chip_data *chip_data;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530217
Jon Hunter23a1ec32015-08-06 14:32:33 +0100218 /*
219 * Counter for managing global pausing of the DMA controller.
220 * Only applicable for devices that don't support individual
221 * channel pausing.
222 */
223 u32 global_pause_count;
224
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530225 /* Some register need to be cache before suspend */
226 u32 reg_gen;
227
228 /* Last member of the structure */
229 struct tegra_dma_channel channels[0];
230};
231
232static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
233{
234 writel(val, tdma->base_addr + reg);
235}
236
237static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
238{
239 return readl(tdma->base_addr + reg);
240}
241
242static inline void tdc_write(struct tegra_dma_channel *tdc,
243 u32 reg, u32 val)
244{
Jon Hunter13a33282015-08-06 14:32:31 +0100245 writel(val, tdc->chan_addr + reg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530246}
247
248static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
249{
Jon Hunter13a33282015-08-06 14:32:31 +0100250 return readl(tdc->chan_addr + reg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530251}
252
253static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
254{
255 return container_of(dc, struct tegra_dma_channel, dma_chan);
256}
257
258static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
259 struct dma_async_tx_descriptor *td)
260{
261 return container_of(td, struct tegra_dma_desc, txd);
262}
263
264static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
265{
266 return &tdc->dma_chan.dev->device;
267}
268
269static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
270static int tegra_dma_runtime_suspend(struct device *dev);
271static int tegra_dma_runtime_resume(struct device *dev);
272
273/* Get DMA desc from free list, if not there then allocate it. */
274static struct tegra_dma_desc *tegra_dma_desc_get(
275 struct tegra_dma_channel *tdc)
276{
277 struct tegra_dma_desc *dma_desc;
278 unsigned long flags;
279
280 spin_lock_irqsave(&tdc->lock, flags);
281
282 /* Do not allocate if desc are waiting for ack */
283 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
284 if (async_tx_test_ack(&dma_desc->txd)) {
285 list_del(&dma_desc->node);
286 spin_unlock_irqrestore(&tdc->lock, flags);
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +0530287 dma_desc->txd.flags = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530288 return dma_desc;
289 }
290 }
291
292 spin_unlock_irqrestore(&tdc->lock, flags);
293
294 /* Allocate DMA desc */
Jon Hunter8fe97392015-11-13 16:39:42 +0000295 dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
Peter Griffinaef94fe2016-06-07 18:38:41 +0100296 if (!dma_desc)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530297 return NULL;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530298
299 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
300 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
301 dma_desc->txd.flags = 0;
302 return dma_desc;
303}
304
305static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
306 struct tegra_dma_desc *dma_desc)
307{
308 unsigned long flags;
309
310 spin_lock_irqsave(&tdc->lock, flags);
311 if (!list_empty(&dma_desc->tx_list))
312 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
313 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
314 spin_unlock_irqrestore(&tdc->lock, flags);
315}
316
317static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
318 struct tegra_dma_channel *tdc)
319{
320 struct tegra_dma_sg_req *sg_req = NULL;
321 unsigned long flags;
322
323 spin_lock_irqsave(&tdc->lock, flags);
324 if (!list_empty(&tdc->free_sg_req)) {
325 sg_req = list_first_entry(&tdc->free_sg_req,
326 typeof(*sg_req), node);
327 list_del(&sg_req->node);
328 spin_unlock_irqrestore(&tdc->lock, flags);
329 return sg_req;
330 }
331 spin_unlock_irqrestore(&tdc->lock, flags);
332
Jon Hunter8fe97392015-11-13 16:39:42 +0000333 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
Peter Griffinaef94fe2016-06-07 18:38:41 +0100334
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530335 return sg_req;
336}
337
338static int tegra_dma_slave_config(struct dma_chan *dc,
339 struct dma_slave_config *sconfig)
340{
341 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
342
343 if (!list_empty(&tdc->pending_sg_req)) {
344 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
345 return -EBUSY;
346 }
347
348 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
Dmitry Osipenkof6160f32017-11-16 20:11:06 +0300349 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
350 sconfig->device_fc) {
Shardar Shariff Md00ef4492016-04-23 15:06:00 +0530351 if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
352 return -EINVAL;
Stephen Warren996556c2013-11-11 13:09:35 -0700353 tdc->slave_id = sconfig->slave_id;
Shardar Shariff Md00ef4492016-04-23 15:06:00 +0530354 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530355 tdc->config_init = true;
356 return 0;
357}
358
359static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
360 bool wait_for_burst_complete)
361{
362 struct tegra_dma *tdma = tdc->tdma;
363
364 spin_lock(&tdma->global_lock);
Jon Hunter23a1ec32015-08-06 14:32:33 +0100365
366 if (tdc->tdma->global_pause_count == 0) {
367 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
368 if (wait_for_burst_complete)
369 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
370 }
371
372 tdc->tdma->global_pause_count++;
373
374 spin_unlock(&tdma->global_lock);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530375}
376
377static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
378{
379 struct tegra_dma *tdma = tdc->tdma;
380
Jon Hunter23a1ec32015-08-06 14:32:33 +0100381 spin_lock(&tdma->global_lock);
382
383 if (WARN_ON(tdc->tdma->global_pause_count == 0))
384 goto out;
385
386 if (--tdc->tdma->global_pause_count == 0)
387 tdma_write(tdma, TEGRA_APBDMA_GENERAL,
388 TEGRA_APBDMA_GENERAL_ENABLE);
389
390out:
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530391 spin_unlock(&tdma->global_lock);
392}
393
Laxman Dewangan1b140902013-01-06 21:52:02 +0530394static void tegra_dma_pause(struct tegra_dma_channel *tdc,
395 bool wait_for_burst_complete)
396{
397 struct tegra_dma *tdma = tdc->tdma;
398
399 if (tdma->chip_data->support_channel_pause) {
400 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
401 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
402 if (wait_for_burst_complete)
403 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
404 } else {
405 tegra_dma_global_pause(tdc, wait_for_burst_complete);
406 }
407}
408
409static void tegra_dma_resume(struct tegra_dma_channel *tdc)
410{
411 struct tegra_dma *tdma = tdc->tdma;
412
413 if (tdma->chip_data->support_channel_pause) {
414 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
415 } else {
416 tegra_dma_global_resume(tdc);
417 }
418}
419
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530420static void tegra_dma_stop(struct tegra_dma_channel *tdc)
421{
422 u32 csr;
423 u32 status;
424
425 /* Disable interrupts */
426 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
427 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
428 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
429
430 /* Disable DMA */
431 csr &= ~TEGRA_APBDMA_CSR_ENB;
432 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
433
434 /* Clear interrupt status if it is there */
435 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
436 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
437 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
438 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
439 }
440 tdc->busy = false;
441}
442
443static void tegra_dma_start(struct tegra_dma_channel *tdc,
444 struct tegra_dma_sg_req *sg_req)
445{
446 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
447
448 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
449 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
450 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
451 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
452 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700453 if (tdc->tdma->chip_data->support_separate_wcount_reg)
454 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530455
456 /* Start DMA */
457 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
458 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
459}
460
461static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
462 struct tegra_dma_sg_req *nsg_req)
463{
464 unsigned long status;
465
466 /*
467 * The DMA controller reloads the new configuration for next transfer
468 * after last burst of current transfer completes.
469 * If there is no IEC status then this makes sure that last burst
470 * has not be completed. There may be case that last burst is on
471 * flight and so it can complete but because DMA is paused, it
472 * will not generates interrupt as well as not reload the new
473 * configuration.
474 * If there is already IEC status then interrupt handler need to
475 * load new configuration.
476 */
Laxman Dewangan1b140902013-01-06 21:52:02 +0530477 tegra_dma_pause(tdc, false);
Thierry Reding7b0e00d2016-06-14 16:18:46 +0200478 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530479
480 /*
481 * If interrupt is pending then do nothing as the ISR will handle
482 * the programing for new request.
483 */
484 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
485 dev_err(tdc2dev(tdc),
486 "Skipping new configuration as interrupt is pending\n");
Laxman Dewangan1b140902013-01-06 21:52:02 +0530487 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530488 return;
489 }
490
491 /* Safe to program new configuration */
492 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
493 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700494 if (tdc->tdma->chip_data->support_separate_wcount_reg)
495 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
496 nsg_req->ch_regs.wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530497 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
498 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
499 nsg_req->configured = true;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300500 nsg_req->words_xferred = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530501
Laxman Dewangan1b140902013-01-06 21:52:02 +0530502 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530503}
504
505static void tdc_start_head_req(struct tegra_dma_channel *tdc)
506{
507 struct tegra_dma_sg_req *sg_req;
508
509 if (list_empty(&tdc->pending_sg_req))
510 return;
511
512 sg_req = list_first_entry(&tdc->pending_sg_req,
513 typeof(*sg_req), node);
514 tegra_dma_start(tdc, sg_req);
515 sg_req->configured = true;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300516 sg_req->words_xferred = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530517 tdc->busy = true;
518}
519
520static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
521{
522 struct tegra_dma_sg_req *hsgreq;
523 struct tegra_dma_sg_req *hnsgreq;
524
525 if (list_empty(&tdc->pending_sg_req))
526 return;
527
528 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
529 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
530 hnsgreq = list_first_entry(&hsgreq->node,
531 typeof(*hnsgreq), node);
532 tegra_dma_configure_for_next(tdc, hnsgreq);
533 }
534}
535
536static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
537 struct tegra_dma_sg_req *sg_req, unsigned long status)
538{
539 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
540}
541
542static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
543{
544 struct tegra_dma_sg_req *sgreq;
545 struct tegra_dma_desc *dma_desc;
546
547 while (!list_empty(&tdc->pending_sg_req)) {
548 sgreq = list_first_entry(&tdc->pending_sg_req,
549 typeof(*sgreq), node);
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800550 list_move_tail(&sgreq->node, &tdc->free_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530551 if (sgreq->last_sg) {
552 dma_desc = sgreq->dma_desc;
553 dma_desc->dma_status = DMA_ERROR;
554 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
555
556 /* Add in cb list if it is not there. */
557 if (!dma_desc->cb_count)
558 list_add_tail(&dma_desc->cb_node,
559 &tdc->cb_desc);
560 dma_desc->cb_count++;
561 }
562 }
563 tdc->isr_handler = NULL;
564}
565
566static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
567 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
568{
569 struct tegra_dma_sg_req *hsgreq = NULL;
570
571 if (list_empty(&tdc->pending_sg_req)) {
Ben Dooks547b3112018-11-21 16:13:21 +0000572 dev_err(tdc2dev(tdc), "DMA is running without req\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530573 tegra_dma_stop(tdc);
574 return false;
575 }
576
577 /*
578 * Check that head req on list should be in flight.
579 * If it is not in flight then abort transfer as
580 * looping of transfer can not continue.
581 */
582 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
583 if (!hsgreq->configured) {
584 tegra_dma_stop(tdc);
Ben Dooks547b3112018-11-21 16:13:21 +0000585 dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530586 tegra_dma_abort_all(tdc);
587 return false;
588 }
589
590 /* Configure next request */
591 if (!to_terminate)
592 tdc_configure_next_head_desc(tdc);
593 return true;
594}
595
596static void handle_once_dma_done(struct tegra_dma_channel *tdc,
597 bool to_terminate)
598{
599 struct tegra_dma_sg_req *sgreq;
600 struct tegra_dma_desc *dma_desc;
601
602 tdc->busy = false;
603 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
604 dma_desc = sgreq->dma_desc;
605 dma_desc->bytes_transferred += sgreq->req_len;
606
607 list_del(&sgreq->node);
608 if (sgreq->last_sg) {
Vinod Koul00d696f2013-10-16 21:04:50 +0530609 dma_desc->dma_status = DMA_COMPLETE;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530610 dma_cookie_complete(&dma_desc->txd);
611 if (!dma_desc->cb_count)
612 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
613 dma_desc->cb_count++;
614 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
615 }
616 list_add_tail(&sgreq->node, &tdc->free_sg_req);
617
618 /* Do not start DMA if it is going to be terminate */
619 if (to_terminate || list_empty(&tdc->pending_sg_req))
620 return;
621
622 tdc_start_head_req(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530623}
624
625static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
626 bool to_terminate)
627{
628 struct tegra_dma_sg_req *sgreq;
629 struct tegra_dma_desc *dma_desc;
630 bool st;
631
632 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
633 dma_desc = sgreq->dma_desc;
Ben Dookse486df32018-11-21 16:13:19 +0000634 /* if we dma for long enough the transfer count will wrap */
635 dma_desc->bytes_transferred =
636 (dma_desc->bytes_transferred + sgreq->req_len) %
637 dma_desc->bytes_requested;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530638
639 /* Callback need to be call */
640 if (!dma_desc->cb_count)
641 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
642 dma_desc->cb_count++;
643
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300644 sgreq->words_xferred = 0;
645
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530646 /* If not last req then put at end of pending list */
647 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800648 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530649 sgreq->configured = false;
650 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
651 if (!st)
652 dma_desc->dma_status = DMA_ERROR;
653 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530654}
655
656static void tegra_dma_tasklet(unsigned long data)
657{
658 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
Dave Jiang370c0442016-07-20 13:13:16 -0700659 struct dmaengine_desc_callback cb;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530660 struct tegra_dma_desc *dma_desc;
661 unsigned long flags;
662 int cb_count;
663
664 spin_lock_irqsave(&tdc->lock, flags);
665 while (!list_empty(&tdc->cb_desc)) {
666 dma_desc = list_first_entry(&tdc->cb_desc,
667 typeof(*dma_desc), cb_node);
668 list_del(&dma_desc->cb_node);
Dave Jiang370c0442016-07-20 13:13:16 -0700669 dmaengine_desc_get_callback(&dma_desc->txd, &cb);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530670 cb_count = dma_desc->cb_count;
671 dma_desc->cb_count = 0;
Ben Dooks95f295f2018-11-21 16:13:23 +0000672 trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
673 cb.callback);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530674 spin_unlock_irqrestore(&tdc->lock, flags);
Dave Jiang370c0442016-07-20 13:13:16 -0700675 while (cb_count--)
676 dmaengine_desc_callback_invoke(&cb, NULL);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530677 spin_lock_irqsave(&tdc->lock, flags);
678 }
679 spin_unlock_irqrestore(&tdc->lock, flags);
680}
681
682static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
683{
684 struct tegra_dma_channel *tdc = dev_id;
685 unsigned long status;
686 unsigned long flags;
687
688 spin_lock_irqsave(&tdc->lock, flags);
689
Ben Dooks95f295f2018-11-21 16:13:23 +0000690 trace_tegra_dma_isr(&tdc->dma_chan, irq);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530691 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
692 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
693 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
694 tdc->isr_handler(tdc, false);
695 tasklet_schedule(&tdc->tasklet);
696 spin_unlock_irqrestore(&tdc->lock, flags);
697 return IRQ_HANDLED;
698 }
699
700 spin_unlock_irqrestore(&tdc->lock, flags);
701 dev_info(tdc2dev(tdc),
702 "Interrupt already served status 0x%08lx\n", status);
703 return IRQ_NONE;
704}
705
706static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
707{
708 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
709 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
710 unsigned long flags;
711 dma_cookie_t cookie;
712
713 spin_lock_irqsave(&tdc->lock, flags);
714 dma_desc->dma_status = DMA_IN_PROGRESS;
715 cookie = dma_cookie_assign(&dma_desc->txd);
716 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
717 spin_unlock_irqrestore(&tdc->lock, flags);
718 return cookie;
719}
720
721static void tegra_dma_issue_pending(struct dma_chan *dc)
722{
723 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
724 unsigned long flags;
725
726 spin_lock_irqsave(&tdc->lock, flags);
727 if (list_empty(&tdc->pending_sg_req)) {
728 dev_err(tdc2dev(tdc), "No DMA request\n");
729 goto end;
730 }
731 if (!tdc->busy) {
732 tdc_start_head_req(tdc);
733
734 /* Continuous single mode: Configure next req */
735 if (tdc->cyclic) {
736 /*
737 * Wait for 1 burst time for configure DMA for
738 * next transfer.
739 */
740 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
741 tdc_configure_next_head_desc(tdc);
742 }
743 }
744end:
745 spin_unlock_irqrestore(&tdc->lock, flags);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530746}
747
Vinod Koula7c439a2014-12-08 11:30:17 +0530748static int tegra_dma_terminate_all(struct dma_chan *dc)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530749{
750 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
751 struct tegra_dma_sg_req *sgreq;
752 struct tegra_dma_desc *dma_desc;
753 unsigned long flags;
754 unsigned long status;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700755 unsigned long wcount;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530756 bool was_busy;
757
758 spin_lock_irqsave(&tdc->lock, flags);
759 if (list_empty(&tdc->pending_sg_req)) {
760 spin_unlock_irqrestore(&tdc->lock, flags);
Vinod Koula7c439a2014-12-08 11:30:17 +0530761 return 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530762 }
763
764 if (!tdc->busy)
765 goto skip_dma_stop;
766
767 /* Pause DMA before checking the queue status */
Laxman Dewangan1b140902013-01-06 21:52:02 +0530768 tegra_dma_pause(tdc, true);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530769
770 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
771 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
772 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
773 tdc->isr_handler(tdc, true);
774 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
775 }
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700776 if (tdc->tdma->chip_data->support_separate_wcount_reg)
777 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
778 else
779 wcount = status;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530780
781 was_busy = tdc->busy;
782 tegra_dma_stop(tdc);
783
784 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
785 sgreq = list_first_entry(&tdc->pending_sg_req,
786 typeof(*sgreq), node);
787 sgreq->dma_desc->bytes_transferred +=
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700788 get_current_xferred_count(tdc, sgreq, wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530789 }
Laxman Dewangan1b140902013-01-06 21:52:02 +0530790 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530791
792skip_dma_stop:
793 tegra_dma_abort_all(tdc);
794
795 while (!list_empty(&tdc->cb_desc)) {
796 dma_desc = list_first_entry(&tdc->cb_desc,
797 typeof(*dma_desc), cb_node);
798 list_del(&dma_desc->cb_node);
799 dma_desc->cb_count = 0;
800 }
801 spin_unlock_irqrestore(&tdc->lock, flags);
Vinod Koula7c439a2014-12-08 11:30:17 +0530802 return 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530803}
804
Dmitry Osipenkodda5e352020-02-09 19:33:40 +0300805static void tegra_dma_synchronize(struct dma_chan *dc)
806{
807 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
808
809 tasklet_kill(&tdc->tasklet);
810}
811
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300812static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
813 struct tegra_dma_sg_req *sg_req)
814{
815 unsigned long status, wcount = 0;
816
817 if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
818 return 0;
819
820 if (tdc->tdma->chip_data->support_separate_wcount_reg)
821 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
822
823 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
824
825 if (!tdc->tdma->chip_data->support_separate_wcount_reg)
826 wcount = status;
827
828 if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
829 return sg_req->req_len;
830
831 wcount = get_current_xferred_count(tdc, sg_req, wcount);
832
833 if (!wcount) {
834 /*
835 * If wcount wasn't ever polled for this SG before, then
836 * simply assume that transfer hasn't started yet.
837 *
838 * Otherwise it's the end of the transfer.
839 *
840 * The alternative would be to poll the status register
841 * until EOC bit is set or wcount goes UP. That's so
842 * because EOC bit is getting set only after the last
843 * burst's completion and counter is less than the actual
844 * transfer size by 4 bytes. The counter value wraps around
845 * in a cyclic mode before EOC is set(!), so we can't easily
846 * distinguish start of transfer from its end.
847 */
848 if (sg_req->words_xferred)
849 wcount = sg_req->req_len - 4;
850
851 } else if (wcount < sg_req->words_xferred) {
852 /*
853 * This case will never happen for a non-cyclic transfer.
854 *
855 * For a cyclic transfer, although it is possible for the
856 * next transfer to have already started (resetting the word
857 * count), this case should still not happen because we should
858 * have detected that the EOC bit is set and hence the transfer
859 * was completed.
860 */
861 WARN_ON_ONCE(1);
862
863 wcount = sg_req->req_len - 4;
864 } else {
865 sg_req->words_xferred = wcount;
866 }
867
868 return wcount;
869}
870
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530871static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
872 dma_cookie_t cookie, struct dma_tx_state *txstate)
873{
874 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
875 struct tegra_dma_desc *dma_desc;
876 struct tegra_dma_sg_req *sg_req;
877 enum dma_status ret;
878 unsigned long flags;
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530879 unsigned int residual;
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300880 unsigned int bytes = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530881
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530882 ret = dma_cookie_status(dc, cookie, txstate);
Jon Hunterd3183442016-06-29 17:08:39 +0100883 if (ret == DMA_COMPLETE)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530884 return ret;
Andy Shevchenko0a0aee22013-05-27 15:14:39 +0300885
886 spin_lock_irqsave(&tdc->lock, flags);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530887
888 /* Check on wait_ack desc status */
889 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
890 if (dma_desc->txd.cookie == cookie) {
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530891 ret = dma_desc->dma_status;
Jon Hunter004f6142016-06-29 17:08:38 +0100892 goto found;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530893 }
894 }
895
896 /* Check in pending list */
897 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
898 dma_desc = sg_req->dma_desc;
899 if (dma_desc->txd.cookie == cookie) {
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300900 bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530901 ret = dma_desc->dma_status;
Jon Hunter004f6142016-06-29 17:08:38 +0100902 goto found;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530903 }
904 }
905
Jon Hunter019bfcc2016-06-29 17:08:37 +0100906 dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
Jon Hunter004f6142016-06-29 17:08:38 +0100907 dma_desc = NULL;
908
909found:
Jon Hunterd3183442016-06-29 17:08:39 +0100910 if (dma_desc && txstate) {
Jon Hunter004f6142016-06-29 17:08:38 +0100911 residual = dma_desc->bytes_requested -
Dmitry Osipenko156a5992019-07-05 18:05:19 +0300912 ((dma_desc->bytes_transferred + bytes) %
Jon Hunter004f6142016-06-29 17:08:38 +0100913 dma_desc->bytes_requested);
914 dma_set_residue(txstate, residual);
915 }
916
Ben Dooks95f295f2018-11-21 16:13:23 +0000917 trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530918 spin_unlock_irqrestore(&tdc->lock, flags);
919 return ret;
920}
921
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530922static inline int get_bus_width(struct tegra_dma_channel *tdc,
923 enum dma_slave_buswidth slave_bw)
924{
925 switch (slave_bw) {
926 case DMA_SLAVE_BUSWIDTH_1_BYTE:
927 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
928 case DMA_SLAVE_BUSWIDTH_2_BYTES:
929 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
930 case DMA_SLAVE_BUSWIDTH_4_BYTES:
931 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
932 case DMA_SLAVE_BUSWIDTH_8_BYTES:
933 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
934 default:
935 dev_warn(tdc2dev(tdc),
936 "slave bw is not supported, using 32bits\n");
937 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
938 }
939}
940
941static inline int get_burst_size(struct tegra_dma_channel *tdc,
942 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
943{
944 int burst_byte;
945 int burst_ahb_width;
946
947 /*
948 * burst_size from client is in terms of the bus_width.
949 * convert them into AHB memory width which is 4 byte.
950 */
951 burst_byte = burst_size * slave_bw;
952 burst_ahb_width = burst_byte / 4;
953
954 /* If burst size is 0 then calculate the burst size based on length */
955 if (!burst_ahb_width) {
956 if (len & 0xF)
957 return TEGRA_APBDMA_AHBSEQ_BURST_1;
958 else if ((len >> 4) & 0x1)
959 return TEGRA_APBDMA_AHBSEQ_BURST_4;
960 else
961 return TEGRA_APBDMA_AHBSEQ_BURST_8;
962 }
963 if (burst_ahb_width < 4)
964 return TEGRA_APBDMA_AHBSEQ_BURST_1;
965 else if (burst_ahb_width < 8)
966 return TEGRA_APBDMA_AHBSEQ_BURST_4;
967 else
968 return TEGRA_APBDMA_AHBSEQ_BURST_8;
969}
970
971static int get_transfer_param(struct tegra_dma_channel *tdc,
972 enum dma_transfer_direction direction, unsigned long *apb_addr,
973 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
974 enum dma_slave_buswidth *slave_bw)
975{
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530976 switch (direction) {
977 case DMA_MEM_TO_DEV:
978 *apb_addr = tdc->dma_sconfig.dst_addr;
979 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
980 *burst_size = tdc->dma_sconfig.dst_maxburst;
981 *slave_bw = tdc->dma_sconfig.dst_addr_width;
982 *csr = TEGRA_APBDMA_CSR_DIR;
983 return 0;
984
985 case DMA_DEV_TO_MEM:
986 *apb_addr = tdc->dma_sconfig.src_addr;
987 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
988 *burst_size = tdc->dma_sconfig.src_maxburst;
989 *slave_bw = tdc->dma_sconfig.src_addr_width;
990 *csr = 0;
991 return 0;
992
993 default:
Ben Dooks547b3112018-11-21 16:13:21 +0000994 dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530995 return -EINVAL;
996 }
997 return -EINVAL;
998}
999
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001000static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
1001 struct tegra_dma_channel_regs *ch_regs, u32 len)
1002{
1003 u32 len_field = (len - 4) & 0xFFFC;
1004
1005 if (tdc->tdma->chip_data->support_separate_wcount_reg)
1006 ch_regs->wcount = len_field;
1007 else
1008 ch_regs->csr |= len_field;
1009}
1010
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301011static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
1012 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
1013 enum dma_transfer_direction direction, unsigned long flags,
1014 void *context)
1015{
1016 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1017 struct tegra_dma_desc *dma_desc;
Thierry Reding7b0e00d2016-06-14 16:18:46 +02001018 unsigned int i;
1019 struct scatterlist *sg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301020 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1021 struct list_head req_list;
1022 struct tegra_dma_sg_req *sg_req = NULL;
1023 u32 burst_size;
1024 enum dma_slave_buswidth slave_bw;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301025
1026 if (!tdc->config_init) {
Ben Dooks547b3112018-11-21 16:13:21 +00001027 dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301028 return NULL;
1029 }
1030 if (sg_len < 1) {
1031 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1032 return NULL;
1033 }
1034
Jon Hunterdc1ff4b2015-08-06 14:32:32 +01001035 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1036 &burst_size, &slave_bw) < 0)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301037 return NULL;
1038
1039 INIT_LIST_HEAD(&req_list);
1040
1041 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1042 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1043 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1044 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1045
Dmitry Osipenkof6160f32017-11-16 20:11:06 +03001046 csr |= TEGRA_APBDMA_CSR_ONCE;
1047
1048 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1049 csr |= TEGRA_APBDMA_CSR_FLOW;
1050 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1051 }
1052
Dmitry Osipenkodc161062019-05-30 00:43:55 +03001053 if (flags & DMA_PREP_INTERRUPT) {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301054 csr |= TEGRA_APBDMA_CSR_IE_EOC;
Dmitry Osipenkodc161062019-05-30 00:43:55 +03001055 } else {
1056 WARN_ON_ONCE(1);
1057 return NULL;
1058 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301059
1060 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1061
1062 dma_desc = tegra_dma_desc_get(tdc);
1063 if (!dma_desc) {
Ben Dooks547b3112018-11-21 16:13:21 +00001064 dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301065 return NULL;
1066 }
1067 INIT_LIST_HEAD(&dma_desc->tx_list);
1068 INIT_LIST_HEAD(&dma_desc->cb_node);
1069 dma_desc->cb_count = 0;
1070 dma_desc->bytes_requested = 0;
1071 dma_desc->bytes_transferred = 0;
1072 dma_desc->dma_status = DMA_IN_PROGRESS;
1073
1074 /* Make transfer requests */
1075 for_each_sg(sgl, sg, sg_len, i) {
1076 u32 len, mem;
1077
Laxman Dewangan597c8542012-06-22 20:41:10 +05301078 mem = sg_dma_address(sg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301079 len = sg_dma_len(sg);
1080
1081 if ((len & 3) || (mem & 3) ||
1082 (len > tdc->tdma->chip_data->max_dma_count)) {
1083 dev_err(tdc2dev(tdc),
Ben Dooks547b3112018-11-21 16:13:21 +00001084 "DMA length/memory address is not supported\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301085 tegra_dma_desc_put(tdc, dma_desc);
1086 return NULL;
1087 }
1088
1089 sg_req = tegra_dma_sg_req_get(tdc);
1090 if (!sg_req) {
Ben Dooks547b3112018-11-21 16:13:21 +00001091 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301092 tegra_dma_desc_put(tdc, dma_desc);
1093 return NULL;
1094 }
1095
1096 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1097 dma_desc->bytes_requested += len;
1098
1099 sg_req->ch_regs.apb_ptr = apb_ptr;
1100 sg_req->ch_regs.ahb_ptr = mem;
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001101 sg_req->ch_regs.csr = csr;
1102 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301103 sg_req->ch_regs.apb_seq = apb_seq;
1104 sg_req->ch_regs.ahb_seq = ahb_seq;
1105 sg_req->configured = false;
1106 sg_req->last_sg = false;
1107 sg_req->dma_desc = dma_desc;
1108 sg_req->req_len = len;
1109
1110 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1111 }
1112 sg_req->last_sg = true;
1113 if (flags & DMA_CTRL_ACK)
1114 dma_desc->txd.flags = DMA_CTRL_ACK;
1115
1116 /*
1117 * Make sure that mode should not be conflicting with currently
1118 * configured mode.
1119 */
1120 if (!tdc->isr_handler) {
1121 tdc->isr_handler = handle_once_dma_done;
1122 tdc->cyclic = false;
1123 } else {
1124 if (tdc->cyclic) {
1125 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1126 tegra_dma_desc_put(tdc, dma_desc);
1127 return NULL;
1128 }
1129 }
1130
1131 return &dma_desc->txd;
1132}
1133
Sachin Kamat404ff6692013-09-06 17:16:22 +05301134static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301135 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1136 size_t period_len, enum dma_transfer_direction direction,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +02001137 unsigned long flags)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301138{
1139 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1140 struct tegra_dma_desc *dma_desc = NULL;
Thierry Reding7b0e00d2016-06-14 16:18:46 +02001141 struct tegra_dma_sg_req *sg_req = NULL;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301142 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1143 int len;
1144 size_t remain_len;
1145 dma_addr_t mem = buf_addr;
1146 u32 burst_size;
1147 enum dma_slave_buswidth slave_bw;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301148
1149 if (!buf_len || !period_len) {
1150 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1151 return NULL;
1152 }
1153
1154 if (!tdc->config_init) {
1155 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1156 return NULL;
1157 }
1158
1159 /*
1160 * We allow to take more number of requests till DMA is
1161 * not started. The driver will loop over all requests.
1162 * Once DMA is started then new requests can be queued only after
1163 * terminating the DMA.
1164 */
1165 if (tdc->busy) {
Ben Dooks547b3112018-11-21 16:13:21 +00001166 dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301167 return NULL;
1168 }
1169
1170 /*
1171 * We only support cycle transfer when buf_len is multiple of
1172 * period_len.
1173 */
1174 if (buf_len % period_len) {
1175 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1176 return NULL;
1177 }
1178
1179 len = period_len;
1180 if ((len & 3) || (buf_addr & 3) ||
1181 (len > tdc->tdma->chip_data->max_dma_count)) {
1182 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1183 return NULL;
1184 }
1185
Jon Hunterdc1ff4b2015-08-06 14:32:32 +01001186 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1187 &burst_size, &slave_bw) < 0)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301188 return NULL;
1189
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301190 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1191 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1192 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1193 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1194
Dmitry Osipenkof6160f32017-11-16 20:11:06 +03001195 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1196 csr |= TEGRA_APBDMA_CSR_FLOW;
1197 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1198 }
1199
Dmitry Osipenkodc161062019-05-30 00:43:55 +03001200 if (flags & DMA_PREP_INTERRUPT) {
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301201 csr |= TEGRA_APBDMA_CSR_IE_EOC;
Dmitry Osipenkodc161062019-05-30 00:43:55 +03001202 } else {
1203 WARN_ON_ONCE(1);
1204 return NULL;
1205 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301206
1207 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1208
1209 dma_desc = tegra_dma_desc_get(tdc);
1210 if (!dma_desc) {
1211 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1212 return NULL;
1213 }
1214
1215 INIT_LIST_HEAD(&dma_desc->tx_list);
1216 INIT_LIST_HEAD(&dma_desc->cb_node);
1217 dma_desc->cb_count = 0;
1218
1219 dma_desc->bytes_transferred = 0;
1220 dma_desc->bytes_requested = buf_len;
1221 remain_len = buf_len;
1222
1223 /* Split transfer equal to period size */
1224 while (remain_len) {
1225 sg_req = tegra_dma_sg_req_get(tdc);
1226 if (!sg_req) {
Ben Dooks547b3112018-11-21 16:13:21 +00001227 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301228 tegra_dma_desc_put(tdc, dma_desc);
1229 return NULL;
1230 }
1231
1232 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1233 sg_req->ch_regs.apb_ptr = apb_ptr;
1234 sg_req->ch_regs.ahb_ptr = mem;
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001235 sg_req->ch_regs.csr = csr;
1236 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301237 sg_req->ch_regs.apb_seq = apb_seq;
1238 sg_req->ch_regs.ahb_seq = ahb_seq;
1239 sg_req->configured = false;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301240 sg_req->last_sg = false;
1241 sg_req->dma_desc = dma_desc;
1242 sg_req->req_len = len;
1243
1244 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1245 remain_len -= len;
1246 mem += len;
1247 }
1248 sg_req->last_sg = true;
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301249 if (flags & DMA_CTRL_ACK)
1250 dma_desc->txd.flags = DMA_CTRL_ACK;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301251
1252 /*
1253 * Make sure that mode should not be conflicting with currently
1254 * configured mode.
1255 */
1256 if (!tdc->isr_handler) {
1257 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1258 tdc->cyclic = true;
1259 } else {
1260 if (!tdc->cyclic) {
1261 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1262 tegra_dma_desc_put(tdc, dma_desc);
1263 return NULL;
1264 }
1265 }
1266
1267 return &dma_desc->txd;
1268}
1269
1270static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1271{
1272 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301273 struct tegra_dma *tdma = tdc->tdma;
1274 int ret;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301275
1276 dma_cookie_init(&tdc->dma_chan);
1277 tdc->config_init = false;
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001278
1279 ret = pm_runtime_get_sync(tdma->dev);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301280 if (ret < 0)
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001281 return ret;
1282
1283 return 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301284}
1285
1286static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1287{
1288 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301289 struct tegra_dma *tdma = tdc->tdma;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301290 struct tegra_dma_desc *dma_desc;
1291 struct tegra_dma_sg_req *sg_req;
1292 struct list_head dma_desc_list;
1293 struct list_head sg_req_list;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301294
1295 INIT_LIST_HEAD(&dma_desc_list);
1296 INIT_LIST_HEAD(&sg_req_list);
1297
1298 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1299
Dmitry Osipenko8e841722020-02-09 19:33:41 +03001300 tegra_dma_terminate_all(dc);
Dmitry Osipenko41ffc422020-02-09 19:33:42 +03001301 tasklet_kill(&tdc->tasklet);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301302
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301303 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1304 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1305 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1306 INIT_LIST_HEAD(&tdc->cb_desc);
1307 tdc->config_init = false;
Dmitry Osipenko7bdc1e22013-05-11 20:30:53 +04001308 tdc->isr_handler = NULL;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301309
1310 while (!list_empty(&dma_desc_list)) {
1311 dma_desc = list_first_entry(&dma_desc_list,
1312 typeof(*dma_desc), node);
1313 list_del(&dma_desc->node);
1314 kfree(dma_desc);
1315 }
1316
1317 while (!list_empty(&sg_req_list)) {
1318 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1319 list_del(&sg_req->node);
1320 kfree(sg_req);
1321 }
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001322 pm_runtime_put(tdma->dev);
Stephen Warren996556c2013-11-11 13:09:35 -07001323
Shardar Shariff Md00ef4492016-04-23 15:06:00 +05301324 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
Stephen Warren996556c2013-11-11 13:09:35 -07001325}
1326
1327static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1328 struct of_dma *ofdma)
1329{
1330 struct tegra_dma *tdma = ofdma->of_dma_data;
1331 struct dma_chan *chan;
1332 struct tegra_dma_channel *tdc;
1333
Shardar Shariff Md00ef4492016-04-23 15:06:00 +05301334 if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1335 dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1336 return NULL;
1337 }
1338
Stephen Warren996556c2013-11-11 13:09:35 -07001339 chan = dma_get_any_slave_channel(&tdma->dma_dev);
1340 if (!chan)
1341 return NULL;
1342
1343 tdc = to_tegra_dma_chan(chan);
1344 tdc->slave_id = dma_spec->args[0];
1345
1346 return chan;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301347}
1348
1349/* Tegra20 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001350static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301351 .nr_channels = 16,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001352 .channel_reg_size = 0x20,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301353 .max_dma_count = 1024UL * 64,
Laxman Dewangan1b140902013-01-06 21:52:02 +05301354 .support_channel_pause = false,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001355 .support_separate_wcount_reg = false,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301356};
1357
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301358/* Tegra30 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001359static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301360 .nr_channels = 32,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001361 .channel_reg_size = 0x20,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301362 .max_dma_count = 1024UL * 64,
Laxman Dewangan1b140902013-01-06 21:52:02 +05301363 .support_channel_pause = false,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001364 .support_separate_wcount_reg = false,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301365};
1366
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301367/* Tegra114 specific DMA controller information */
1368static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1369 .nr_channels = 32,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001370 .channel_reg_size = 0x20,
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301371 .max_dma_count = 1024UL * 64,
1372 .support_channel_pause = true,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001373 .support_separate_wcount_reg = false,
1374};
1375
1376/* Tegra148 specific DMA controller information */
1377static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1378 .nr_channels = 32,
1379 .channel_reg_size = 0x40,
1380 .max_dma_count = 1024UL * 64,
1381 .support_channel_pause = true,
1382 .support_separate_wcount_reg = true,
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301383};
1384
Bill Pemberton463a1f82012-11-19 13:22:55 -05001385static int tegra_dma_probe(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301386{
Thierry Reding7b0e00d2016-06-14 16:18:46 +02001387 struct resource *res;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301388 struct tegra_dma *tdma;
1389 int ret;
1390 int i;
Laxman Dewangan333f16e2016-03-01 18:54:40 +05301391 const struct tegra_dma_chip_data *cdata;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301392
Laxman Dewangan333f16e2016-03-01 18:54:40 +05301393 cdata = of_device_get_match_data(&pdev->dev);
1394 if (!cdata) {
1395 dev_err(&pdev->dev, "Error: No device match data found\n");
Stephen Warrendc7badb2013-03-11 16:30:26 -06001396 return -ENODEV;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301397 }
1398
Gustavo A. R. Silvad3d70372019-01-04 15:16:12 -06001399 tdma = devm_kzalloc(&pdev->dev,
1400 struct_size(tdma, channels, cdata->nr_channels),
1401 GFP_KERNEL);
Peter Griffinaef94fe2016-06-07 18:38:41 +01001402 if (!tdma)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301403 return -ENOMEM;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301404
1405 tdma->dev = &pdev->dev;
1406 tdma->chip_data = cdata;
1407 platform_set_drvdata(pdev, tdma);
1408
Dmitry Osipenkoc55c745e2020-02-09 19:33:43 +03001409 tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
Thierry Reding73312052013-01-21 11:09:00 +01001410 if (IS_ERR(tdma->base_addr))
1411 return PTR_ERR(tdma->base_addr);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301412
1413 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1414 if (IS_ERR(tdma->dma_clk)) {
1415 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1416 return PTR_ERR(tdma->dma_clk);
1417 }
1418
Stephen Warren9aa433d2013-11-06 16:35:34 -07001419 tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1420 if (IS_ERR(tdma->rst)) {
1421 dev_err(&pdev->dev, "Error: Missing reset\n");
1422 return PTR_ERR(tdma->rst);
1423 }
1424
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301425 spin_lock_init(&tdma->global_lock);
1426
1427 pm_runtime_enable(&pdev->dev);
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001428 if (!pm_runtime_enabled(&pdev->dev))
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301429 ret = tegra_dma_runtime_resume(&pdev->dev);
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001430 else
1431 ret = pm_runtime_get_sync(&pdev->dev);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301432
Laxman Dewanganffc49302012-07-20 13:31:08 +05301433 if (ret < 0) {
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001434 pm_runtime_disable(&pdev->dev);
1435 return ret;
Laxman Dewanganffc49302012-07-20 13:31:08 +05301436 }
1437
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301438 /* Reset DMA controller */
Stephen Warren9aa433d2013-11-06 16:35:34 -07001439 reset_control_assert(tdma->rst);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301440 udelay(2);
Stephen Warren9aa433d2013-11-06 16:35:34 -07001441 reset_control_deassert(tdma->rst);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301442
1443 /* Enable global DMA registers */
1444 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1445 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1446 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1447
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001448 pm_runtime_put(&pdev->dev);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301449
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301450 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1451 for (i = 0; i < cdata->nr_channels; i++) {
1452 struct tegra_dma_channel *tdc = &tdma->channels[i];
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301453
Jon Hunter13a33282015-08-06 14:32:31 +01001454 tdc->chan_addr = tdma->base_addr +
1455 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1456 (i * cdata->channel_reg_size);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301457
1458 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1459 if (!res) {
1460 ret = -EINVAL;
1461 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1462 goto err_irq;
1463 }
1464 tdc->irq = res->start;
Laxman Dewangand0fc9052012-10-03 22:48:07 +05301465 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
Jon Hunter05e866b2015-11-13 16:39:43 +00001466 ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301467 if (ret) {
1468 dev_err(&pdev->dev,
1469 "request_irq failed with err %d channel %d\n",
Dmitry Osipenkoac7ae752013-05-11 20:30:52 +04001470 ret, i);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301471 goto err_irq;
1472 }
1473
1474 tdc->dma_chan.device = &tdma->dma_dev;
1475 dma_cookie_init(&tdc->dma_chan);
1476 list_add_tail(&tdc->dma_chan.device_node,
1477 &tdma->dma_dev.channels);
1478 tdc->tdma = tdma;
1479 tdc->id = i;
Shardar Shariff Md00ef4492016-04-23 15:06:00 +05301480 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301481
1482 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1483 (unsigned long)tdc);
1484 spin_lock_init(&tdc->lock);
1485
1486 INIT_LIST_HEAD(&tdc->pending_sg_req);
1487 INIT_LIST_HEAD(&tdc->free_sg_req);
1488 INIT_LIST_HEAD(&tdc->free_dma_desc);
1489 INIT_LIST_HEAD(&tdc->cb_desc);
1490 }
1491
1492 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1493 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
Laxman Dewangan46fb3f82012-06-22 17:12:43 +05301494 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1495
Jon Hunter23a1ec32015-08-06 14:32:33 +01001496 tdma->global_pause_count = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301497 tdma->dma_dev.dev = &pdev->dev;
1498 tdma->dma_dev.device_alloc_chan_resources =
1499 tegra_dma_alloc_chan_resources;
1500 tdma->dma_dev.device_free_chan_resources =
1501 tegra_dma_free_chan_resources;
1502 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1503 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
Paul Walmsley891653a2015-01-06 06:44:56 +00001504 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1505 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1506 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1507 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1508 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1509 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1510 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1511 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1512 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
Dmitry Osipenko156a5992019-07-05 18:05:19 +03001513 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
Maxime Ripard662f1ac2014-11-17 14:42:37 +01001514 tdma->dma_dev.device_config = tegra_dma_slave_config;
1515 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
Dmitry Osipenkodda5e352020-02-09 19:33:40 +03001516 tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301517 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1518 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1519
1520 ret = dma_async_device_register(&tdma->dma_dev);
1521 if (ret < 0) {
1522 dev_err(&pdev->dev,
1523 "Tegra20 APB DMA driver registration failed %d\n", ret);
1524 goto err_irq;
1525 }
1526
Stephen Warren996556c2013-11-11 13:09:35 -07001527 ret = of_dma_controller_register(pdev->dev.of_node,
1528 tegra_dma_of_xlate, tdma);
1529 if (ret < 0) {
1530 dev_err(&pdev->dev,
1531 "Tegra20 APB DMA OF registration failed %d\n", ret);
1532 goto err_unregister_dma_dev;
1533 }
1534
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301535 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1536 cdata->nr_channels);
1537 return 0;
1538
Stephen Warren996556c2013-11-11 13:09:35 -07001539err_unregister_dma_dev:
1540 dma_async_device_unregister(&tdma->dma_dev);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301541err_irq:
1542 while (--i >= 0) {
1543 struct tegra_dma_channel *tdc = &tdma->channels[i];
Jon Hunter05e866b2015-11-13 16:39:43 +00001544
1545 free_irq(tdc->irq, tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301546 }
1547
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301548 pm_runtime_disable(&pdev->dev);
1549 if (!pm_runtime_status_suspended(&pdev->dev))
1550 tegra_dma_runtime_suspend(&pdev->dev);
1551 return ret;
1552}
1553
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001554static int tegra_dma_remove(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301555{
1556 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1557 int i;
1558 struct tegra_dma_channel *tdc;
1559
1560 dma_async_device_unregister(&tdma->dma_dev);
1561
1562 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1563 tdc = &tdma->channels[i];
Jon Hunter05e866b2015-11-13 16:39:43 +00001564 free_irq(tdc->irq, tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301565 }
1566
1567 pm_runtime_disable(&pdev->dev);
1568 if (!pm_runtime_status_suspended(&pdev->dev))
1569 tegra_dma_runtime_suspend(&pdev->dev);
1570
1571 return 0;
1572}
1573
1574static int tegra_dma_runtime_suspend(struct device *dev)
1575{
Jon Hunter286a6442015-11-13 16:39:39 +00001576 struct tegra_dma *tdma = dev_get_drvdata(dev);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301577 int i;
Laxman Dewangan3065c192013-04-24 15:24:27 +05301578
1579 tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1580 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1581 struct tegra_dma_channel *tdc = &tdma->channels[i];
1582 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1583
Jon Hunter4aad5be2015-11-13 16:39:41 +00001584 /* Only save the state of DMA channels that are in use */
1585 if (!tdc->config_init)
1586 continue;
1587
Laxman Dewangan3065c192013-04-24 15:24:27 +05301588 ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1589 ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1590 ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1591 ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1592 ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
Jon Hunter68ae7a92015-11-13 16:39:40 +00001593 if (tdma->chip_data->support_separate_wcount_reg)
1594 ch_reg->wcount = tdc_read(tdc,
1595 TEGRA_APBDMA_CHAN_WCOUNT);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301596 }
1597
Jon Hunter65a5c3d2017-06-06 13:49:29 +01001598 clk_disable_unprepare(tdma->dma_clk);
1599
Laxman Dewangan3065c192013-04-24 15:24:27 +05301600 return 0;
1601}
1602
Jon Hunter65a5c3d2017-06-06 13:49:29 +01001603static int tegra_dma_runtime_resume(struct device *dev)
Laxman Dewangan3065c192013-04-24 15:24:27 +05301604{
1605 struct tegra_dma *tdma = dev_get_drvdata(dev);
Jon Hunter65a5c3d2017-06-06 13:49:29 +01001606 int i, ret;
Laxman Dewangan3065c192013-04-24 15:24:27 +05301607
Jon Hunter65a5c3d2017-06-06 13:49:29 +01001608 ret = clk_prepare_enable(tdma->dma_clk);
1609 if (ret < 0) {
1610 dev_err(dev, "clk_enable failed: %d\n", ret);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301611 return ret;
Jon Hunter65a5c3d2017-06-06 13:49:29 +01001612 }
Laxman Dewangan3065c192013-04-24 15:24:27 +05301613
1614 tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1615 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1616 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1617
1618 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1619 struct tegra_dma_channel *tdc = &tdma->channels[i];
1620 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1621
Jon Hunter4aad5be2015-11-13 16:39:41 +00001622 /* Only restore the state of DMA channels that are in use */
1623 if (!tdc->config_init)
1624 continue;
1625
Jon Hunter68ae7a92015-11-13 16:39:40 +00001626 if (tdma->chip_data->support_separate_wcount_reg)
1627 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
1628 ch_reg->wcount);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301629 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1630 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1631 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1632 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1633 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1634 (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1635 }
1636
Laxman Dewangan3065c192013-04-24 15:24:27 +05301637 return 0;
1638}
Laxman Dewangan3065c192013-04-24 15:24:27 +05301639
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001640static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001641 SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1642 NULL)
Jon Hunter65a5c3d2017-06-06 13:49:29 +01001643 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1644 pm_runtime_force_resume)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301645};
1646
Laxman Dewangan242637b2016-03-04 15:55:11 +05301647static const struct of_device_id tegra_dma_of_match[] = {
1648 {
1649 .compatible = "nvidia,tegra148-apbdma",
1650 .data = &tegra148_dma_chip_data,
1651 }, {
1652 .compatible = "nvidia,tegra114-apbdma",
1653 .data = &tegra114_dma_chip_data,
1654 }, {
1655 .compatible = "nvidia,tegra30-apbdma",
1656 .data = &tegra30_dma_chip_data,
1657 }, {
1658 .compatible = "nvidia,tegra20-apbdma",
1659 .data = &tegra20_dma_chip_data,
1660 }, {
1661 },
1662};
1663MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1664
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301665static struct platform_driver tegra_dmac_driver = {
1666 .driver = {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301667 .name = "tegra-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301668 .pm = &tegra_dma_dev_pm_ops,
Stephen Warrendc7badb2013-03-11 16:30:26 -06001669 .of_match_table = tegra_dma_of_match,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301670 },
1671 .probe = tegra_dma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -05001672 .remove = tegra_dma_remove,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301673};
1674
1675module_platform_driver(tegra_dmac_driver);
1676
1677MODULE_ALIAS("platform:tegra20-apbdma");
1678MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1679MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1680MODULE_LICENSE("GPL v2");