blob: da17897b8acbb8acfced3b443dc52eba3cf46154 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Feng Tang7063c0d2010-12-24 13:59:11 +08002/*
Serge Semin6c710c02020-05-29 16:11:59 +03003 * Special handling for DW DMA core
Feng Tang7063c0d2010-12-24 13:59:11 +08004 *
Andy Shevchenko197e96b2014-09-12 15:12:01 +03005 * Copyright (c) 2009, 2014 Intel Corporation.
Feng Tang7063c0d2010-12-24 13:59:11 +08006 */
7
Serge Seminbdbdf0f2020-05-29 16:11:52 +03008#include <linux/completion.h>
Andy Shevchenkoe7940952020-05-06 18:30:22 +03009#include <linux/dma-mapping.h>
10#include <linux/dmaengine.h>
Andy Shevchenkoe62a15d2020-05-06 18:30:21 +030011#include <linux/irqreturn.h>
Serge Seminbdbdf0f2020-05-29 16:11:52 +030012#include <linux/jiffies.h>
Feng Tang7063c0d2010-12-24 13:59:11 +080013#include <linux/pci.h>
Andy Shevchenkod744f822015-03-09 16:48:50 +020014#include <linux/platform_data/dma-dw.h>
Serge Semin6c710c02020-05-29 16:11:59 +030015#include <linux/spi/spi.h>
16#include <linux/types.h>
17
18#include "spi-dw.h"
Feng Tang7063c0d2010-12-24 13:59:11 +080019
Serge Semin1ade2d82020-05-29 16:11:53 +030020#define WAIT_RETRIES 5
Andy Shevchenko30c8eb52014-10-28 18:25:02 +020021#define RX_BUSY 0
Serge Seminc534df92020-05-29 16:11:55 +030022#define RX_BURST_LEVEL 16
Andy Shevchenko30c8eb52014-10-28 18:25:02 +020023#define TX_BUSY 1
Serge Seminc534df92020-05-29 16:11:55 +030024#define TX_BURST_LEVEL 16
Andy Shevchenko30c8eb52014-10-28 18:25:02 +020025
Serge Semin57784412020-05-29 16:12:02 +030026static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
Feng Tang7063c0d2010-12-24 13:59:11 +080027{
Andy Shevchenkod744f822015-03-09 16:48:50 +020028 struct dw_dma_slave *s = param;
Feng Tang7063c0d2010-12-24 13:59:11 +080029
Andy Shevchenkod744f822015-03-09 16:48:50 +020030 if (s->dma_dev != chan->device->dev)
31 return false;
32
33 chan->private = s;
34 return true;
Feng Tang7063c0d2010-12-24 13:59:11 +080035}
36
Serge Semin57784412020-05-29 16:12:02 +030037static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
Serge Semin0b2b6652020-05-29 16:11:56 +030038{
39 struct dma_slave_caps caps;
40 u32 max_burst, def_burst;
41 int ret;
42
43 def_burst = dws->fifo_len / 2;
44
45 ret = dma_get_slave_caps(dws->rxchan, &caps);
46 if (!ret && caps.max_burst)
47 max_burst = caps.max_burst;
48 else
49 max_burst = RX_BURST_LEVEL;
50
51 dws->rxburst = min(max_burst, def_burst);
Serge Semin01ddbbb2020-09-20 14:23:12 +030052 dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
Serge Semin0b2b6652020-05-29 16:11:56 +030053
54 ret = dma_get_slave_caps(dws->txchan, &caps);
55 if (!ret && caps.max_burst)
56 max_burst = caps.max_burst;
57 else
58 max_burst = TX_BURST_LEVEL;
59
Serge Semin01ddbbb2020-09-20 14:23:12 +030060 /*
61 * Having a Rx DMA channel serviced with higher priority than a Tx DMA
62 * channel might not be enough to provide a well balanced DMA-based
63 * SPI transfer interface. There might still be moments when the Tx DMA
64 * channel is occasionally handled faster than the Rx DMA channel.
65 * That in its turn will eventually cause the SPI Rx FIFO overflow if
66 * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
67 * cleared by the Rx DMA channel. In order to fix the problem the Tx
68 * DMA activity is intentionally slowed down by limiting the SPI Tx
69 * FIFO depth with a value twice bigger than the Tx burst length.
70 */
Serge Semin0b2b6652020-05-29 16:11:56 +030071 dws->txburst = min(max_burst, def_burst);
Serge Semin01ddbbb2020-09-20 14:23:12 +030072 dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
Serge Semin0b2b6652020-05-29 16:11:56 +030073}
74
Serge Semin57784412020-05-29 16:12:02 +030075static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +080076{
Andy Shevchenkob3f82dc2020-05-29 21:31:49 +030077 struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
78 struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030079 struct pci_dev *dma_dev;
Feng Tang7063c0d2010-12-24 13:59:11 +080080 dma_cap_mask_t mask;
81
82 /*
83 * Get pci device for DMA controller, currently it could only
Andy Shevchenkoea092452014-09-12 15:11:59 +030084 * be the DMA controller of Medfield
Feng Tang7063c0d2010-12-24 13:59:11 +080085 */
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030086 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
87 if (!dma_dev)
88 return -ENODEV;
89
Feng Tang7063c0d2010-12-24 13:59:11 +080090 dma_cap_zero(mask);
91 dma_cap_set(DMA_SLAVE, mask);
92
93 /* 1. Init rx channel */
Andy Shevchenkob3f82dc2020-05-29 21:31:49 +030094 rx->dma_dev = &dma_dev->dev;
95 dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
Feng Tang7063c0d2010-12-24 13:59:11 +080096 if (!dws->rxchan)
97 goto err_exit;
Feng Tang7063c0d2010-12-24 13:59:11 +080098
99 /* 2. Init tx channel */
Andy Shevchenkob3f82dc2020-05-29 21:31:49 +0300100 tx->dma_dev = &dma_dev->dev;
101 dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
Feng Tang7063c0d2010-12-24 13:59:11 +0800102 if (!dws->txchan)
103 goto free_rxchan;
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300104
105 dws->master->dma_rx = dws->rxchan;
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200106 dws->master->dma_tx = dws->txchan;
Feng Tang7063c0d2010-12-24 13:59:11 +0800107
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300108 init_completion(&dws->dma_completion);
109
Serge Semin57784412020-05-29 16:12:02 +0300110 dw_spi_dma_maxburst_init(dws);
Serge Semin0b2b6652020-05-29 16:11:56 +0300111
Feng Tang7063c0d2010-12-24 13:59:11 +0800112 return 0;
113
114free_rxchan:
115 dma_release_channel(dws->rxchan);
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300116 dws->rxchan = NULL;
Feng Tang7063c0d2010-12-24 13:59:11 +0800117err_exit:
Andy Shevchenkob89e9c82014-09-12 15:12:00 +0300118 return -EBUSY;
Feng Tang7063c0d2010-12-24 13:59:11 +0800119}
120
Serge Semin57784412020-05-29 16:12:02 +0300121static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300122{
123 dws->rxchan = dma_request_slave_channel(dev, "rx");
124 if (!dws->rxchan)
125 return -ENODEV;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300126
127 dws->txchan = dma_request_slave_channel(dev, "tx");
128 if (!dws->txchan) {
129 dma_release_channel(dws->rxchan);
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300130 dws->rxchan = NULL;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300131 return -ENODEV;
132 }
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300133
134 dws->master->dma_rx = dws->rxchan;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300135 dws->master->dma_tx = dws->txchan;
136
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300137 init_completion(&dws->dma_completion);
138
Serge Semin57784412020-05-29 16:12:02 +0300139 dw_spi_dma_maxburst_init(dws);
Serge Semin0b2b6652020-05-29 16:11:56 +0300140
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300141 return 0;
142}
143
Serge Semin57784412020-05-29 16:12:02 +0300144static void dw_spi_dma_exit(struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +0800145{
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300146 if (dws->txchan) {
147 dmaengine_terminate_sync(dws->txchan);
148 dma_release_channel(dws->txchan);
149 }
Andy Shevchenko8e45ef62014-09-18 20:08:53 +0300150
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300151 if (dws->rxchan) {
152 dmaengine_terminate_sync(dws->rxchan);
153 dma_release_channel(dws->rxchan);
154 }
Serge Semin0327f0b2020-05-15 13:47:42 +0300155
156 dw_writel(dws, DW_SPI_DMACR, 0);
Feng Tang7063c0d2010-12-24 13:59:11 +0800157}
158
Serge Semin57784412020-05-29 16:12:02 +0300159static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200160{
Thor Thayerdd114442015-03-12 14:19:31 -0500161 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200162
163 if (!irq_status)
164 return IRQ_NONE;
165
Thor Thayerdd114442015-03-12 14:19:31 -0500166 dw_readl(dws, DW_SPI_ICR);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200167 spi_reset_chip(dws);
168
169 dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
170 dws->master->cur_msg->status = -EIO;
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300171 complete(&dws->dma_completion);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200172 return IRQ_HANDLED;
173}
174
Serge Semin57784412020-05-29 16:12:02 +0300175static bool dw_spi_can_dma(struct spi_controller *master,
176 struct spi_device *spi, struct spi_transfer *xfer)
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200177{
Jarkko Nikula721483e2018-02-01 17:17:29 +0200178 struct dw_spi *dws = spi_controller_get_devdata(master);
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200179
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200180 return xfer->len > dws->fifo_len;
181}
182
Serge Semin57784412020-05-29 16:12:02 +0300183static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
184{
Serge Semin4fdc03a2020-05-22 03:07:54 +0300185 if (n_bytes == 1)
Andy Shevchenkoe31abce2015-03-09 16:48:45 +0200186 return DMA_SLAVE_BUSWIDTH_1_BYTE;
Serge Semin4fdc03a2020-05-22 03:07:54 +0300187 else if (n_bytes == 2)
Andy Shevchenkoe31abce2015-03-09 16:48:45 +0200188 return DMA_SLAVE_BUSWIDTH_2_BYTES;
189
190 return DMA_SLAVE_BUSWIDTH_UNDEFINED;
191}
192
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300193static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer)
194{
195 unsigned long long ms;
196
197 ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE;
198 do_div(ms, xfer->effective_speed_hz);
199 ms += ms + 200;
200
201 if (ms > UINT_MAX)
202 ms = UINT_MAX;
203
204 ms = wait_for_completion_timeout(&dws->dma_completion,
205 msecs_to_jiffies(ms));
206
207 if (ms == 0) {
208 dev_err(&dws->master->cur_msg->spi->dev,
209 "DMA transaction timed out\n");
210 return -ETIMEDOUT;
211 }
212
213 return 0;
214}
215
Serge Semin1ade2d82020-05-29 16:11:53 +0300216static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
217{
218 return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT);
219}
220
221static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
222 struct spi_transfer *xfer)
223{
224 int retry = WAIT_RETRIES;
225 struct spi_delay delay;
226 u32 nents;
227
228 nents = dw_readl(dws, DW_SPI_TXFLR);
229 delay.unit = SPI_DELAY_UNIT_SCK;
230 delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
231
232 while (dw_spi_dma_tx_busy(dws) && retry--)
233 spi_delay_exec(&delay, xfer);
234
235 if (retry < 0) {
236 dev_err(&dws->master->dev, "Tx hanged up\n");
237 return -EIO;
238 }
239
240 return 0;
241}
242
Feng Tang7063c0d2010-12-24 13:59:11 +0800243/*
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200244 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
245 * channel will clear a corresponding bit.
Feng Tang7063c0d2010-12-24 13:59:11 +0800246 */
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200247static void dw_spi_dma_tx_done(void *arg)
Feng Tang7063c0d2010-12-24 13:59:11 +0800248{
249 struct dw_spi *dws = arg;
250
Andy Shevchenko854d2f22015-03-06 14:42:01 +0200251 clear_bit(TX_BUSY, &dws->dma_chan_busy);
252 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
Feng Tang7063c0d2010-12-24 13:59:11 +0800253 return;
Serge Semin0327f0b2020-05-15 13:47:42 +0300254
255 dw_writel(dws, DW_SPI_DMACR, 0);
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300256 complete(&dws->dma_completion);
Feng Tang7063c0d2010-12-24 13:59:11 +0800257}
258
Serge Semina874d812020-09-20 14:23:14 +0300259static int dw_spi_dma_config_tx(struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +0800260{
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200261 struct dma_slave_config txconf;
Feng Tang7063c0d2010-12-24 13:59:11 +0800262
Andy Shevchenko3cb97e22020-05-06 18:30:18 +0300263 memset(&txconf, 0, sizeof(txconf));
Vinod Koula485df42011-10-14 10:47:38 +0530264 txconf.direction = DMA_MEM_TO_DEV;
Feng Tang7063c0d2010-12-24 13:59:11 +0800265 txconf.dst_addr = dws->dma_addr;
Serge Semin0b2b6652020-05-29 16:11:56 +0300266 txconf.dst_maxburst = dws->txburst;
Feng Tang7063c0d2010-12-24 13:59:11 +0800267 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Serge Semin57784412020-05-29 16:12:02 +0300268 txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
Viresh Kumar258aea72012-02-01 16:12:19 +0530269 txconf.device_fc = false;
Feng Tang7063c0d2010-12-24 13:59:11 +0800270
Serge Semina874d812020-09-20 14:23:14 +0300271 return dmaengine_slave_config(dws->txchan, &txconf);
272}
273
274static struct dma_async_tx_descriptor *
275dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
276{
277 struct dma_async_tx_descriptor *txdesc;
Feng Tang7063c0d2010-12-24 13:59:11 +0800278
Andy Shevchenko2a285292014-10-02 16:31:08 +0300279 txdesc = dmaengine_prep_slave_sg(dws->txchan,
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200280 xfer->tx_sg.sgl,
281 xfer->tx_sg.nents,
Vinod Koula485df42011-10-14 10:47:38 +0530282 DMA_MEM_TO_DEV,
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300283 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200284 if (!txdesc)
285 return NULL;
286
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200287 txdesc->callback = dw_spi_dma_tx_done;
Feng Tang7063c0d2010-12-24 13:59:11 +0800288 txdesc->callback_param = dws;
289
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200290 return txdesc;
291}
292
Serge Semin33726ef2020-05-29 16:11:54 +0300293static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
294{
295 return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT);
296}
297
298static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
299{
300 int retry = WAIT_RETRIES;
301 struct spi_delay delay;
302 unsigned long ns, us;
303 u32 nents;
304
305 /*
306 * It's unlikely that DMA engine is still doing the data fetching, but
307 * if it's let's give it some reasonable time. The timeout calculation
308 * is based on the synchronous APB/SSI reference clock rate, on a
309 * number of data entries left in the Rx FIFO, times a number of clock
310 * periods normally needed for a single APB read/write transaction
311 * without PREADY signal utilized (which is true for the DW APB SSI
312 * controller).
313 */
314 nents = dw_readl(dws, DW_SPI_RXFLR);
315 ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
316 if (ns <= NSEC_PER_USEC) {
317 delay.unit = SPI_DELAY_UNIT_NSECS;
318 delay.value = ns;
319 } else {
320 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
321 delay.unit = SPI_DELAY_UNIT_USECS;
322 delay.value = clamp_val(us, 0, USHRT_MAX);
323 }
324
325 while (dw_spi_dma_rx_busy(dws) && retry--)
326 spi_delay_exec(&delay, NULL);
327
328 if (retry < 0) {
329 dev_err(&dws->master->dev, "Rx hanged up\n");
330 return -EIO;
331 }
332
333 return 0;
334}
335
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200336/*
337 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
338 * channel will clear a corresponding bit.
339 */
340static void dw_spi_dma_rx_done(void *arg)
341{
342 struct dw_spi *dws = arg;
343
Andy Shevchenko854d2f22015-03-06 14:42:01 +0200344 clear_bit(RX_BUSY, &dws->dma_chan_busy);
345 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200346 return;
Serge Semin0327f0b2020-05-15 13:47:42 +0300347
348 dw_writel(dws, DW_SPI_DMACR, 0);
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300349 complete(&dws->dma_completion);
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200350}
351
Serge Semina874d812020-09-20 14:23:14 +0300352static int dw_spi_dma_config_rx(struct dw_spi *dws)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200353{
354 struct dma_slave_config rxconf;
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200355
Andy Shevchenko3cb97e22020-05-06 18:30:18 +0300356 memset(&rxconf, 0, sizeof(rxconf));
Vinod Koula485df42011-10-14 10:47:38 +0530357 rxconf.direction = DMA_DEV_TO_MEM;
Feng Tang7063c0d2010-12-24 13:59:11 +0800358 rxconf.src_addr = dws->dma_addr;
Serge Semin0b2b6652020-05-29 16:11:56 +0300359 rxconf.src_maxburst = dws->rxburst;
Feng Tang7063c0d2010-12-24 13:59:11 +0800360 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Serge Semin57784412020-05-29 16:12:02 +0300361 rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
Viresh Kumar258aea72012-02-01 16:12:19 +0530362 rxconf.device_fc = false;
Feng Tang7063c0d2010-12-24 13:59:11 +0800363
Serge Semina874d812020-09-20 14:23:14 +0300364 return dmaengine_slave_config(dws->rxchan, &rxconf);
365}
366
367static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
368 struct spi_transfer *xfer)
369{
370 struct dma_async_tx_descriptor *rxdesc;
371
372 if (!xfer->rx_buf)
373 return NULL;
Feng Tang7063c0d2010-12-24 13:59:11 +0800374
Andy Shevchenko2a285292014-10-02 16:31:08 +0300375 rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200376 xfer->rx_sg.sgl,
377 xfer->rx_sg.nents,
Vinod Koula485df42011-10-14 10:47:38 +0530378 DMA_DEV_TO_MEM,
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300379 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200380 if (!rxdesc)
381 return NULL;
382
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200383 rxdesc->callback = dw_spi_dma_rx_done;
Feng Tang7063c0d2010-12-24 13:59:11 +0800384 rxdesc->callback_param = dws;
385
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200386 return rxdesc;
387}
388
Serge Semin57784412020-05-29 16:12:02 +0300389static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200390{
Serge Semin7ef30382020-09-20 14:23:13 +0300391 u16 imr, dma_ctrl;
Serge Semina874d812020-09-20 14:23:14 +0300392 int ret;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200393
Serge Semin7ef30382020-09-20 14:23:13 +0300394 if (!xfer->tx_buf)
395 return -EINVAL;
396
Serge Semina874d812020-09-20 14:23:14 +0300397 /* Setup DMA channels */
398 ret = dw_spi_dma_config_tx(dws);
399 if (ret)
400 return ret;
401
402 if (xfer->rx_buf) {
403 ret = dw_spi_dma_config_rx(dws);
404 if (ret)
405 return ret;
406 }
407
Serge Semin7ef30382020-09-20 14:23:13 +0300408 /* Set the DMA handshaking interface */
409 dma_ctrl = SPI_DMA_TDMAE;
Andy Shevchenko3d7db0f2020-05-29 21:31:50 +0300410 if (xfer->rx_buf)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200411 dma_ctrl |= SPI_DMA_RDMAE;
Thor Thayerdd114442015-03-12 14:19:31 -0500412 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200413
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200414 /* Set the interrupt mask */
Serge Semin7ef30382020-09-20 14:23:13 +0300415 imr = SPI_INT_TXOI;
Andy Shevchenko3d7db0f2020-05-29 21:31:50 +0300416 if (xfer->rx_buf)
417 imr |= SPI_INT_RXUI | SPI_INT_RXOI;
Serge Semin43dba9f2020-05-22 03:07:51 +0300418 spi_umask_intr(dws, imr);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200419
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300420 reinit_completion(&dws->dma_completion);
421
Serge Semin57784412020-05-29 16:12:02 +0300422 dws->transfer_handler = dw_spi_dma_transfer_handler;
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200423
Andy Shevchenko9f145382015-03-09 16:48:46 +0200424 return 0;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200425}
426
Serge Semin57784412020-05-29 16:12:02 +0300427static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200428{
429 struct dma_async_tx_descriptor *txdesc, *rxdesc;
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300430 int ret;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200431
Andy Shevchenko9f145382015-03-09 16:48:46 +0200432 /* Prepare the TX dma transfer */
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200433 txdesc = dw_spi_dma_prepare_tx(dws, xfer);
Serge Semin7ef30382020-09-20 14:23:13 +0300434 if (!txdesc)
435 return -EINVAL;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200436
Andy Shevchenko9f145382015-03-09 16:48:46 +0200437 /* Prepare the RX dma transfer */
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200438 rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200439
Feng Tang7063c0d2010-12-24 13:59:11 +0800440 /* rx must be started before tx due to spi instinct */
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200441 if (rxdesc) {
442 set_bit(RX_BUSY, &dws->dma_chan_busy);
443 dmaengine_submit(rxdesc);
444 dma_async_issue_pending(dws->rxchan);
445 }
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300446
Serge Semin7ef30382020-09-20 14:23:13 +0300447 set_bit(TX_BUSY, &dws->dma_chan_busy);
448 dmaengine_submit(txdesc);
449 dma_async_issue_pending(dws->txchan);
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300450
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300451 ret = dw_spi_dma_wait(dws, xfer);
452 if (ret)
453 return ret;
454
Serge Semin7ef30382020-09-20 14:23:13 +0300455 if (dws->master->cur_msg->status == -EINPROGRESS) {
Serge Semin1ade2d82020-05-29 16:11:53 +0300456 ret = dw_spi_dma_wait_tx_done(dws, xfer);
457 if (ret)
458 return ret;
459 }
460
Serge Semin33726ef2020-05-29 16:11:54 +0300461 if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS)
462 ret = dw_spi_dma_wait_rx_done(dws);
463
464 return ret;
Feng Tang7063c0d2010-12-24 13:59:11 +0800465}
466
Serge Semin57784412020-05-29 16:12:02 +0300467static void dw_spi_dma_stop(struct dw_spi *dws)
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200468{
469 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
Andy Shevchenkocf1716e2017-01-03 15:48:20 +0200470 dmaengine_terminate_sync(dws->txchan);
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200471 clear_bit(TX_BUSY, &dws->dma_chan_busy);
472 }
473 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
Andy Shevchenkocf1716e2017-01-03 15:48:20 +0200474 dmaengine_terminate_sync(dws->rxchan);
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200475 clear_bit(RX_BUSY, &dws->dma_chan_busy);
476 }
Serge Semin0327f0b2020-05-15 13:47:42 +0300477
478 dw_writel(dws, DW_SPI_DMACR, 0);
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200479}
480
Serge Semin57784412020-05-29 16:12:02 +0300481static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
482 .dma_init = dw_spi_dma_init_mfld,
483 .dma_exit = dw_spi_dma_exit,
484 .dma_setup = dw_spi_dma_setup,
485 .can_dma = dw_spi_can_dma,
486 .dma_transfer = dw_spi_dma_transfer,
487 .dma_stop = dw_spi_dma_stop,
Feng Tang7063c0d2010-12-24 13:59:11 +0800488};
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300489
Serge Semin57784412020-05-29 16:12:02 +0300490void dw_spi_dma_setup_mfld(struct dw_spi *dws)
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300491{
Serge Semin57784412020-05-29 16:12:02 +0300492 dws->dma_ops = &dw_spi_dma_mfld_ops;
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300493}
Serge Semin57784412020-05-29 16:12:02 +0300494EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld);
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300495
Serge Semin57784412020-05-29 16:12:02 +0300496static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
497 .dma_init = dw_spi_dma_init_generic,
498 .dma_exit = dw_spi_dma_exit,
499 .dma_setup = dw_spi_dma_setup,
500 .can_dma = dw_spi_can_dma,
501 .dma_transfer = dw_spi_dma_transfer,
502 .dma_stop = dw_spi_dma_stop,
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300503};
504
Serge Semin57784412020-05-29 16:12:02 +0300505void dw_spi_dma_setup_generic(struct dw_spi *dws)
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300506{
Serge Semin57784412020-05-29 16:12:02 +0300507 dws->dma_ops = &dw_spi_dma_generic_ops;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300508}
Serge Semin57784412020-05-29 16:12:02 +0300509EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic);