blob: f333c2e23bf6b46a36f16d186362250078d2773f [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Feng Tang7063c0d2010-12-24 13:59:11 +08002/*
Serge Semin6c710c02020-05-29 16:11:59 +03003 * Special handling for DW DMA core
Feng Tang7063c0d2010-12-24 13:59:11 +08004 *
Andy Shevchenko197e96b2014-09-12 15:12:01 +03005 * Copyright (c) 2009, 2014 Intel Corporation.
Feng Tang7063c0d2010-12-24 13:59:11 +08006 */
7
Serge Seminbdbdf0f2020-05-29 16:11:52 +03008#include <linux/completion.h>
Andy Shevchenkoe7940952020-05-06 18:30:22 +03009#include <linux/dma-mapping.h>
10#include <linux/dmaengine.h>
Andy Shevchenkoe62a15d2020-05-06 18:30:21 +030011#include <linux/irqreturn.h>
Serge Seminbdbdf0f2020-05-29 16:11:52 +030012#include <linux/jiffies.h>
Feng Tang7063c0d2010-12-24 13:59:11 +080013#include <linux/pci.h>
Andy Shevchenkod744f822015-03-09 16:48:50 +020014#include <linux/platform_data/dma-dw.h>
Serge Semin6c710c02020-05-29 16:11:59 +030015#include <linux/spi/spi.h>
16#include <linux/types.h>
17
18#include "spi-dw.h"
Feng Tang7063c0d2010-12-24 13:59:11 +080019
Serge Semin1ade2d82020-05-29 16:11:53 +030020#define WAIT_RETRIES 5
Andy Shevchenko30c8eb52014-10-28 18:25:02 +020021#define RX_BUSY 0
Serge Seminc534df92020-05-29 16:11:55 +030022#define RX_BURST_LEVEL 16
Andy Shevchenko30c8eb52014-10-28 18:25:02 +020023#define TX_BUSY 1
Serge Seminc534df92020-05-29 16:11:55 +030024#define TX_BURST_LEVEL 16
Andy Shevchenko30c8eb52014-10-28 18:25:02 +020025
Serge Semin57784412020-05-29 16:12:02 +030026static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
Feng Tang7063c0d2010-12-24 13:59:11 +080027{
Andy Shevchenkod744f822015-03-09 16:48:50 +020028 struct dw_dma_slave *s = param;
Feng Tang7063c0d2010-12-24 13:59:11 +080029
Andy Shevchenkod744f822015-03-09 16:48:50 +020030 if (s->dma_dev != chan->device->dev)
31 return false;
32
33 chan->private = s;
34 return true;
Feng Tang7063c0d2010-12-24 13:59:11 +080035}
36
Serge Semin57784412020-05-29 16:12:02 +030037static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
Serge Semin0b2b6652020-05-29 16:11:56 +030038{
39 struct dma_slave_caps caps;
40 u32 max_burst, def_burst;
41 int ret;
42
43 def_burst = dws->fifo_len / 2;
44
45 ret = dma_get_slave_caps(dws->rxchan, &caps);
46 if (!ret && caps.max_burst)
47 max_burst = caps.max_burst;
48 else
49 max_burst = RX_BURST_LEVEL;
50
51 dws->rxburst = min(max_burst, def_burst);
Serge Semin01ddbbb2020-09-20 14:23:12 +030052 dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
Serge Semin0b2b6652020-05-29 16:11:56 +030053
54 ret = dma_get_slave_caps(dws->txchan, &caps);
55 if (!ret && caps.max_burst)
56 max_burst = caps.max_burst;
57 else
58 max_burst = TX_BURST_LEVEL;
59
Serge Semin01ddbbb2020-09-20 14:23:12 +030060 /*
61 * Having a Rx DMA channel serviced with higher priority than a Tx DMA
62 * channel might not be enough to provide a well balanced DMA-based
63 * SPI transfer interface. There might still be moments when the Tx DMA
64 * channel is occasionally handled faster than the Rx DMA channel.
65 * That in its turn will eventually cause the SPI Rx FIFO overflow if
66 * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
67 * cleared by the Rx DMA channel. In order to fix the problem the Tx
68 * DMA activity is intentionally slowed down by limiting the SPI Tx
69 * FIFO depth with a value twice bigger than the Tx burst length.
70 */
Serge Semin0b2b6652020-05-29 16:11:56 +030071 dws->txburst = min(max_burst, def_burst);
Serge Semin01ddbbb2020-09-20 14:23:12 +030072 dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
Serge Semin0b2b6652020-05-29 16:11:56 +030073}
74
Serge Semin57784412020-05-29 16:12:02 +030075static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +080076{
Andy Shevchenkob3f82dc2020-05-29 21:31:49 +030077 struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
78 struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030079 struct pci_dev *dma_dev;
Feng Tang7063c0d2010-12-24 13:59:11 +080080 dma_cap_mask_t mask;
81
82 /*
83 * Get pci device for DMA controller, currently it could only
Andy Shevchenkoea092452014-09-12 15:11:59 +030084 * be the DMA controller of Medfield
Feng Tang7063c0d2010-12-24 13:59:11 +080085 */
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030086 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
87 if (!dma_dev)
88 return -ENODEV;
89
Feng Tang7063c0d2010-12-24 13:59:11 +080090 dma_cap_zero(mask);
91 dma_cap_set(DMA_SLAVE, mask);
92
93 /* 1. Init rx channel */
Andy Shevchenkob3f82dc2020-05-29 21:31:49 +030094 rx->dma_dev = &dma_dev->dev;
95 dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
Feng Tang7063c0d2010-12-24 13:59:11 +080096 if (!dws->rxchan)
97 goto err_exit;
Feng Tang7063c0d2010-12-24 13:59:11 +080098
99 /* 2. Init tx channel */
Andy Shevchenkob3f82dc2020-05-29 21:31:49 +0300100 tx->dma_dev = &dma_dev->dev;
101 dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
Feng Tang7063c0d2010-12-24 13:59:11 +0800102 if (!dws->txchan)
103 goto free_rxchan;
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300104
105 dws->master->dma_rx = dws->rxchan;
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200106 dws->master->dma_tx = dws->txchan;
Feng Tang7063c0d2010-12-24 13:59:11 +0800107
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300108 init_completion(&dws->dma_completion);
109
Serge Semin57784412020-05-29 16:12:02 +0300110 dw_spi_dma_maxburst_init(dws);
Serge Semin0b2b6652020-05-29 16:11:56 +0300111
Feng Tang7063c0d2010-12-24 13:59:11 +0800112 return 0;
113
114free_rxchan:
115 dma_release_channel(dws->rxchan);
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300116 dws->rxchan = NULL;
Feng Tang7063c0d2010-12-24 13:59:11 +0800117err_exit:
Andy Shevchenkob89e9c82014-09-12 15:12:00 +0300118 return -EBUSY;
Feng Tang7063c0d2010-12-24 13:59:11 +0800119}
120
Serge Semin57784412020-05-29 16:12:02 +0300121static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300122{
123 dws->rxchan = dma_request_slave_channel(dev, "rx");
124 if (!dws->rxchan)
125 return -ENODEV;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300126
127 dws->txchan = dma_request_slave_channel(dev, "tx");
128 if (!dws->txchan) {
129 dma_release_channel(dws->rxchan);
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300130 dws->rxchan = NULL;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300131 return -ENODEV;
132 }
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300133
134 dws->master->dma_rx = dws->rxchan;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300135 dws->master->dma_tx = dws->txchan;
136
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300137 init_completion(&dws->dma_completion);
138
Serge Semin57784412020-05-29 16:12:02 +0300139 dw_spi_dma_maxburst_init(dws);
Serge Semin0b2b6652020-05-29 16:11:56 +0300140
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300141 return 0;
142}
143
Serge Semin57784412020-05-29 16:12:02 +0300144static void dw_spi_dma_exit(struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +0800145{
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300146 if (dws->txchan) {
147 dmaengine_terminate_sync(dws->txchan);
148 dma_release_channel(dws->txchan);
149 }
Andy Shevchenko8e45ef62014-09-18 20:08:53 +0300150
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300151 if (dws->rxchan) {
152 dmaengine_terminate_sync(dws->rxchan);
153 dma_release_channel(dws->rxchan);
154 }
Feng Tang7063c0d2010-12-24 13:59:11 +0800155}
156
Serge Semin57784412020-05-29 16:12:02 +0300157static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200158{
Thor Thayerdd114442015-03-12 14:19:31 -0500159 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200160
161 if (!irq_status)
162 return IRQ_NONE;
163
Thor Thayerdd114442015-03-12 14:19:31 -0500164 dw_readl(dws, DW_SPI_ICR);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200165 spi_reset_chip(dws);
166
167 dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
168 dws->master->cur_msg->status = -EIO;
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300169 complete(&dws->dma_completion);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200170 return IRQ_HANDLED;
171}
172
Serge Semin57784412020-05-29 16:12:02 +0300173static bool dw_spi_can_dma(struct spi_controller *master,
174 struct spi_device *spi, struct spi_transfer *xfer)
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200175{
Jarkko Nikula721483e2018-02-01 17:17:29 +0200176 struct dw_spi *dws = spi_controller_get_devdata(master);
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200177
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200178 return xfer->len > dws->fifo_len;
179}
180
Serge Semin57784412020-05-29 16:12:02 +0300181static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
182{
Serge Semin4fdc03a2020-05-22 03:07:54 +0300183 if (n_bytes == 1)
Andy Shevchenkoe31abce2015-03-09 16:48:45 +0200184 return DMA_SLAVE_BUSWIDTH_1_BYTE;
Serge Semin4fdc03a2020-05-22 03:07:54 +0300185 else if (n_bytes == 2)
Andy Shevchenkoe31abce2015-03-09 16:48:45 +0200186 return DMA_SLAVE_BUSWIDTH_2_BYTES;
187
188 return DMA_SLAVE_BUSWIDTH_UNDEFINED;
189}
190
Serge Semin917ce292020-09-20 14:23:21 +0300191static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300192{
193 unsigned long long ms;
194
Serge Semin917ce292020-09-20 14:23:21 +0300195 ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
196 do_div(ms, speed);
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300197 ms += ms + 200;
198
199 if (ms > UINT_MAX)
200 ms = UINT_MAX;
201
202 ms = wait_for_completion_timeout(&dws->dma_completion,
203 msecs_to_jiffies(ms));
204
205 if (ms == 0) {
206 dev_err(&dws->master->cur_msg->spi->dev,
207 "DMA transaction timed out\n");
208 return -ETIMEDOUT;
209 }
210
211 return 0;
212}
213
Serge Semin1ade2d82020-05-29 16:11:53 +0300214static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
215{
216 return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT);
217}
218
219static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
220 struct spi_transfer *xfer)
221{
222 int retry = WAIT_RETRIES;
223 struct spi_delay delay;
224 u32 nents;
225
226 nents = dw_readl(dws, DW_SPI_TXFLR);
227 delay.unit = SPI_DELAY_UNIT_SCK;
228 delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
229
230 while (dw_spi_dma_tx_busy(dws) && retry--)
231 spi_delay_exec(&delay, xfer);
232
233 if (retry < 0) {
234 dev_err(&dws->master->dev, "Tx hanged up\n");
235 return -EIO;
236 }
237
238 return 0;
239}
240
Feng Tang7063c0d2010-12-24 13:59:11 +0800241/*
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200242 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
243 * channel will clear a corresponding bit.
Feng Tang7063c0d2010-12-24 13:59:11 +0800244 */
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200245static void dw_spi_dma_tx_done(void *arg)
Feng Tang7063c0d2010-12-24 13:59:11 +0800246{
247 struct dw_spi *dws = arg;
248
Andy Shevchenko854d2f22015-03-06 14:42:01 +0200249 clear_bit(TX_BUSY, &dws->dma_chan_busy);
250 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
Feng Tang7063c0d2010-12-24 13:59:11 +0800251 return;
Serge Semin0327f0b2020-05-15 13:47:42 +0300252
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300253 complete(&dws->dma_completion);
Feng Tang7063c0d2010-12-24 13:59:11 +0800254}
255
Serge Semina874d812020-09-20 14:23:14 +0300256static int dw_spi_dma_config_tx(struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +0800257{
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200258 struct dma_slave_config txconf;
Feng Tang7063c0d2010-12-24 13:59:11 +0800259
Andy Shevchenko3cb97e22020-05-06 18:30:18 +0300260 memset(&txconf, 0, sizeof(txconf));
Vinod Koula485df42011-10-14 10:47:38 +0530261 txconf.direction = DMA_MEM_TO_DEV;
Feng Tang7063c0d2010-12-24 13:59:11 +0800262 txconf.dst_addr = dws->dma_addr;
Serge Semin0b2b6652020-05-29 16:11:56 +0300263 txconf.dst_maxburst = dws->txburst;
Feng Tang7063c0d2010-12-24 13:59:11 +0800264 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Serge Semin57784412020-05-29 16:12:02 +0300265 txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
Viresh Kumar258aea72012-02-01 16:12:19 +0530266 txconf.device_fc = false;
Feng Tang7063c0d2010-12-24 13:59:11 +0800267
Serge Semina874d812020-09-20 14:23:14 +0300268 return dmaengine_slave_config(dws->txchan, &txconf);
269}
270
Serge Semin917ce292020-09-20 14:23:21 +0300271static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
272 unsigned int nents)
Serge Semina874d812020-09-20 14:23:14 +0300273{
274 struct dma_async_tx_descriptor *txdesc;
Serge Semin9a6471a2020-09-20 14:23:17 +0300275 dma_cookie_t cookie;
276 int ret;
Feng Tang7063c0d2010-12-24 13:59:11 +0800277
Serge Semin917ce292020-09-20 14:23:21 +0300278 txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
279 DMA_MEM_TO_DEV,
280 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200281 if (!txdesc)
Serge Semin7a4d61f2020-09-20 14:23:18 +0300282 return -ENOMEM;
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200283
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200284 txdesc->callback = dw_spi_dma_tx_done;
Feng Tang7063c0d2010-12-24 13:59:11 +0800285 txdesc->callback_param = dws;
286
Serge Semin9a6471a2020-09-20 14:23:17 +0300287 cookie = dmaengine_submit(txdesc);
288 ret = dma_submit_error(cookie);
289 if (ret) {
290 dmaengine_terminate_sync(dws->txchan);
Serge Semin7a4d61f2020-09-20 14:23:18 +0300291 return ret;
Serge Semin9a6471a2020-09-20 14:23:17 +0300292 }
293
Serge Seminab7a4d72020-09-20 14:23:16 +0300294 set_bit(TX_BUSY, &dws->dma_chan_busy);
295
Serge Semin7a4d61f2020-09-20 14:23:18 +0300296 return 0;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200297}
298
Serge Semin33726ef2020-05-29 16:11:54 +0300299static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
300{
301 return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT);
302}
303
304static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
305{
306 int retry = WAIT_RETRIES;
307 struct spi_delay delay;
308 unsigned long ns, us;
309 u32 nents;
310
311 /*
312 * It's unlikely that DMA engine is still doing the data fetching, but
313 * if it's let's give it some reasonable time. The timeout calculation
314 * is based on the synchronous APB/SSI reference clock rate, on a
315 * number of data entries left in the Rx FIFO, times a number of clock
316 * periods normally needed for a single APB read/write transaction
317 * without PREADY signal utilized (which is true for the DW APB SSI
318 * controller).
319 */
320 nents = dw_readl(dws, DW_SPI_RXFLR);
321 ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
322 if (ns <= NSEC_PER_USEC) {
323 delay.unit = SPI_DELAY_UNIT_NSECS;
324 delay.value = ns;
325 } else {
326 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
327 delay.unit = SPI_DELAY_UNIT_USECS;
328 delay.value = clamp_val(us, 0, USHRT_MAX);
329 }
330
331 while (dw_spi_dma_rx_busy(dws) && retry--)
332 spi_delay_exec(&delay, NULL);
333
334 if (retry < 0) {
335 dev_err(&dws->master->dev, "Rx hanged up\n");
336 return -EIO;
337 }
338
339 return 0;
340}
341
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200342/*
343 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
344 * channel will clear a corresponding bit.
345 */
346static void dw_spi_dma_rx_done(void *arg)
347{
348 struct dw_spi *dws = arg;
349
Andy Shevchenko854d2f22015-03-06 14:42:01 +0200350 clear_bit(RX_BUSY, &dws->dma_chan_busy);
351 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200352 return;
Serge Semin0327f0b2020-05-15 13:47:42 +0300353
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300354 complete(&dws->dma_completion);
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200355}
356
Serge Semina874d812020-09-20 14:23:14 +0300357static int dw_spi_dma_config_rx(struct dw_spi *dws)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200358{
359 struct dma_slave_config rxconf;
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200360
Andy Shevchenko3cb97e22020-05-06 18:30:18 +0300361 memset(&rxconf, 0, sizeof(rxconf));
Vinod Koula485df42011-10-14 10:47:38 +0530362 rxconf.direction = DMA_DEV_TO_MEM;
Feng Tang7063c0d2010-12-24 13:59:11 +0800363 rxconf.src_addr = dws->dma_addr;
Serge Semin0b2b6652020-05-29 16:11:56 +0300364 rxconf.src_maxburst = dws->rxburst;
Feng Tang7063c0d2010-12-24 13:59:11 +0800365 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Serge Semin57784412020-05-29 16:12:02 +0300366 rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
Viresh Kumar258aea72012-02-01 16:12:19 +0530367 rxconf.device_fc = false;
Feng Tang7063c0d2010-12-24 13:59:11 +0800368
Serge Semina874d812020-09-20 14:23:14 +0300369 return dmaengine_slave_config(dws->rxchan, &rxconf);
370}
371
Serge Semin917ce292020-09-20 14:23:21 +0300372static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
373 unsigned int nents)
Serge Semina874d812020-09-20 14:23:14 +0300374{
375 struct dma_async_tx_descriptor *rxdesc;
Serge Semin9a6471a2020-09-20 14:23:17 +0300376 dma_cookie_t cookie;
377 int ret;
Serge Semina874d812020-09-20 14:23:14 +0300378
Serge Semin917ce292020-09-20 14:23:21 +0300379 rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
380 DMA_DEV_TO_MEM,
381 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200382 if (!rxdesc)
Serge Semin7a4d61f2020-09-20 14:23:18 +0300383 return -ENOMEM;
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200384
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200385 rxdesc->callback = dw_spi_dma_rx_done;
Feng Tang7063c0d2010-12-24 13:59:11 +0800386 rxdesc->callback_param = dws;
387
Serge Semin9a6471a2020-09-20 14:23:17 +0300388 cookie = dmaengine_submit(rxdesc);
389 ret = dma_submit_error(cookie);
390 if (ret) {
391 dmaengine_terminate_sync(dws->rxchan);
Serge Semin7a4d61f2020-09-20 14:23:18 +0300392 return ret;
Serge Semin9a6471a2020-09-20 14:23:17 +0300393 }
394
Serge Seminab7a4d72020-09-20 14:23:16 +0300395 set_bit(RX_BUSY, &dws->dma_chan_busy);
396
Serge Semin7a4d61f2020-09-20 14:23:18 +0300397 return 0;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200398}
399
Serge Semin57784412020-05-29 16:12:02 +0300400static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200401{
Serge Semin7ef30382020-09-20 14:23:13 +0300402 u16 imr, dma_ctrl;
Serge Semina874d812020-09-20 14:23:14 +0300403 int ret;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200404
Serge Semin7ef30382020-09-20 14:23:13 +0300405 if (!xfer->tx_buf)
406 return -EINVAL;
407
Serge Semina874d812020-09-20 14:23:14 +0300408 /* Setup DMA channels */
409 ret = dw_spi_dma_config_tx(dws);
410 if (ret)
411 return ret;
412
413 if (xfer->rx_buf) {
414 ret = dw_spi_dma_config_rx(dws);
415 if (ret)
416 return ret;
417 }
418
Serge Semin7ef30382020-09-20 14:23:13 +0300419 /* Set the DMA handshaking interface */
420 dma_ctrl = SPI_DMA_TDMAE;
Andy Shevchenko3d7db0f2020-05-29 21:31:50 +0300421 if (xfer->rx_buf)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200422 dma_ctrl |= SPI_DMA_RDMAE;
Thor Thayerdd114442015-03-12 14:19:31 -0500423 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200424
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200425 /* Set the interrupt mask */
Serge Semin7ef30382020-09-20 14:23:13 +0300426 imr = SPI_INT_TXOI;
Andy Shevchenko3d7db0f2020-05-29 21:31:50 +0300427 if (xfer->rx_buf)
428 imr |= SPI_INT_RXUI | SPI_INT_RXOI;
Serge Semin43dba9f2020-05-22 03:07:51 +0300429 spi_umask_intr(dws, imr);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200430
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300431 reinit_completion(&dws->dma_completion);
432
Serge Semin57784412020-05-29 16:12:02 +0300433 dws->transfer_handler = dw_spi_dma_transfer_handler;
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200434
Andy Shevchenko9f145382015-03-09 16:48:46 +0200435 return 0;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200436}
437
Serge Seminb86fed122020-09-20 14:23:19 +0300438static int dw_spi_dma_transfer_all(struct dw_spi *dws,
439 struct spi_transfer *xfer)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200440{
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300441 int ret;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200442
Serge Seminab7a4d72020-09-20 14:23:16 +0300443 /* Submit the DMA Tx transfer */
Serge Semin917ce292020-09-20 14:23:21 +0300444 ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
Serge Semin7a4d61f2020-09-20 14:23:18 +0300445 if (ret)
Serge Semin945b5b62020-09-20 14:23:20 +0300446 goto err_clear_dmac;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200447
Serge Seminab7a4d72020-09-20 14:23:16 +0300448 /* Submit the DMA Rx transfer if required */
Serge Seminbe3034d2020-09-20 14:23:15 +0300449 if (xfer->rx_buf) {
Serge Semin917ce292020-09-20 14:23:21 +0300450 ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
451 xfer->rx_sg.nents);
Serge Semin7a4d61f2020-09-20 14:23:18 +0300452 if (ret)
Serge Semin945b5b62020-09-20 14:23:20 +0300453 goto err_clear_dmac;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200454
Serge Seminbe3034d2020-09-20 14:23:15 +0300455 /* rx must be started before tx due to spi instinct */
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200456 dma_async_issue_pending(dws->rxchan);
457 }
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300458
Serge Semin7ef30382020-09-20 14:23:13 +0300459 dma_async_issue_pending(dws->txchan);
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300460
Serge Semin917ce292020-09-20 14:23:21 +0300461 ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
Serge Semin945b5b62020-09-20 14:23:20 +0300462
463err_clear_dmac:
464 dw_writel(dws, DW_SPI_DMACR, 0);
465
466 return ret;
Serge Seminb86fed122020-09-20 14:23:19 +0300467}
468
469static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
470{
471 int ret;
472
473 ret = dw_spi_dma_transfer_all(dws, xfer);
Serge Seminbdbdf0f2020-05-29 16:11:52 +0300474 if (ret)
475 return ret;
476
Serge Semin7ef30382020-09-20 14:23:13 +0300477 if (dws->master->cur_msg->status == -EINPROGRESS) {
Serge Semin1ade2d82020-05-29 16:11:53 +0300478 ret = dw_spi_dma_wait_tx_done(dws, xfer);
479 if (ret)
480 return ret;
481 }
482
Serge Seminbe3034d2020-09-20 14:23:15 +0300483 if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
Serge Semin33726ef2020-05-29 16:11:54 +0300484 ret = dw_spi_dma_wait_rx_done(dws);
485
486 return ret;
Feng Tang7063c0d2010-12-24 13:59:11 +0800487}
488
Serge Semin57784412020-05-29 16:12:02 +0300489static void dw_spi_dma_stop(struct dw_spi *dws)
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200490{
491 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
Andy Shevchenkocf1716e2017-01-03 15:48:20 +0200492 dmaengine_terminate_sync(dws->txchan);
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200493 clear_bit(TX_BUSY, &dws->dma_chan_busy);
494 }
495 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
Andy Shevchenkocf1716e2017-01-03 15:48:20 +0200496 dmaengine_terminate_sync(dws->rxchan);
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200497 clear_bit(RX_BUSY, &dws->dma_chan_busy);
498 }
499}
500
Serge Semin57784412020-05-29 16:12:02 +0300501static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
502 .dma_init = dw_spi_dma_init_mfld,
503 .dma_exit = dw_spi_dma_exit,
504 .dma_setup = dw_spi_dma_setup,
505 .can_dma = dw_spi_can_dma,
506 .dma_transfer = dw_spi_dma_transfer,
507 .dma_stop = dw_spi_dma_stop,
Feng Tang7063c0d2010-12-24 13:59:11 +0800508};
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300509
Serge Semin57784412020-05-29 16:12:02 +0300510void dw_spi_dma_setup_mfld(struct dw_spi *dws)
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300511{
Serge Semin57784412020-05-29 16:12:02 +0300512 dws->dma_ops = &dw_spi_dma_mfld_ops;
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300513}
Serge Semin57784412020-05-29 16:12:02 +0300514EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld);
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300515
Serge Semin57784412020-05-29 16:12:02 +0300516static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
517 .dma_init = dw_spi_dma_init_generic,
518 .dma_exit = dw_spi_dma_exit,
519 .dma_setup = dw_spi_dma_setup,
520 .can_dma = dw_spi_can_dma,
521 .dma_transfer = dw_spi_dma_transfer,
522 .dma_stop = dw_spi_dma_stop,
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300523};
524
Serge Semin57784412020-05-29 16:12:02 +0300525void dw_spi_dma_setup_generic(struct dw_spi *dws)
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300526{
Serge Semin57784412020-05-29 16:12:02 +0300527 dws->dma_ops = &dw_spi_dma_generic_ops;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300528}
Serge Semin57784412020-05-29 16:12:02 +0300529EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic);