Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 2 | /* |
Serge Semin | 6c710c0 | 2020-05-29 16:11:59 +0300 | [diff] [blame] | 3 | * Special handling for DW DMA core |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 4 | * |
Andy Shevchenko | 197e96b | 2014-09-12 15:12:01 +0300 | [diff] [blame] | 5 | * Copyright (c) 2009, 2014 Intel Corporation. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 6 | */ |
| 7 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 8 | #include <linux/completion.h> |
Andy Shevchenko | e794095 | 2020-05-06 18:30:22 +0300 | [diff] [blame] | 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/dmaengine.h> |
Andy Shevchenko | e62a15d | 2020-05-06 18:30:21 +0300 | [diff] [blame] | 11 | #include <linux/irqreturn.h> |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 12 | #include <linux/jiffies.h> |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 13 | #include <linux/pci.h> |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 14 | #include <linux/platform_data/dma-dw.h> |
Serge Semin | 6c710c0 | 2020-05-29 16:11:59 +0300 | [diff] [blame] | 15 | #include <linux/spi/spi.h> |
| 16 | #include <linux/types.h> |
| 17 | |
| 18 | #include "spi-dw.h" |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 19 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 20 | #define RX_BUSY 0 |
Serge Semin | c534df9 | 2020-05-29 16:11:55 +0300 | [diff] [blame] | 21 | #define RX_BURST_LEVEL 16 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 22 | #define TX_BUSY 1 |
Serge Semin | c534df9 | 2020-05-29 16:11:55 +0300 | [diff] [blame] | 23 | #define TX_BURST_LEVEL 16 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 24 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 25 | static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 26 | { |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 27 | struct dw_dma_slave *s = param; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 28 | |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 29 | if (s->dma_dev != chan->device->dev) |
| 30 | return false; |
| 31 | |
| 32 | chan->private = s; |
| 33 | return true; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 34 | } |
| 35 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 36 | static void dw_spi_dma_maxburst_init(struct dw_spi *dws) |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 37 | { |
| 38 | struct dma_slave_caps caps; |
| 39 | u32 max_burst, def_burst; |
| 40 | int ret; |
| 41 | |
| 42 | def_burst = dws->fifo_len / 2; |
| 43 | |
| 44 | ret = dma_get_slave_caps(dws->rxchan, &caps); |
| 45 | if (!ret && caps.max_burst) |
| 46 | max_burst = caps.max_burst; |
| 47 | else |
| 48 | max_burst = RX_BURST_LEVEL; |
| 49 | |
| 50 | dws->rxburst = min(max_burst, def_burst); |
Serge Semin | 01ddbbb | 2020-09-20 14:23:12 +0300 | [diff] [blame] | 51 | dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 52 | |
| 53 | ret = dma_get_slave_caps(dws->txchan, &caps); |
| 54 | if (!ret && caps.max_burst) |
| 55 | max_burst = caps.max_burst; |
| 56 | else |
| 57 | max_burst = TX_BURST_LEVEL; |
| 58 | |
Serge Semin | 01ddbbb | 2020-09-20 14:23:12 +0300 | [diff] [blame] | 59 | /* |
| 60 | * Having a Rx DMA channel serviced with higher priority than a Tx DMA |
| 61 | * channel might not be enough to provide a well balanced DMA-based |
| 62 | * SPI transfer interface. There might still be moments when the Tx DMA |
| 63 | * channel is occasionally handled faster than the Rx DMA channel. |
| 64 | * That in its turn will eventually cause the SPI Rx FIFO overflow if |
| 65 | * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's |
| 66 | * cleared by the Rx DMA channel. In order to fix the problem the Tx |
| 67 | * DMA activity is intentionally slowed down by limiting the SPI Tx |
| 68 | * FIFO depth with a value twice bigger than the Tx burst length. |
| 69 | */ |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 70 | dws->txburst = min(max_burst, def_burst); |
Serge Semin | 01ddbbb | 2020-09-20 14:23:12 +0300 | [diff] [blame] | 71 | dw_writel(dws, DW_SPI_DMATDLR, dws->txburst); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 72 | } |
| 73 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame] | 74 | static void dw_spi_dma_sg_burst_init(struct dw_spi *dws) |
| 75 | { |
| 76 | struct dma_slave_caps tx = {0}, rx = {0}; |
| 77 | |
| 78 | dma_get_slave_caps(dws->txchan, &tx); |
| 79 | dma_get_slave_caps(dws->rxchan, &rx); |
| 80 | |
| 81 | if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0) |
| 82 | dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst); |
| 83 | else if (tx.max_sg_burst > 0) |
| 84 | dws->dma_sg_burst = tx.max_sg_burst; |
| 85 | else if (rx.max_sg_burst > 0) |
| 86 | dws->dma_sg_burst = rx.max_sg_burst; |
| 87 | else |
| 88 | dws->dma_sg_burst = 0; |
| 89 | } |
| 90 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 91 | static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 92 | { |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 93 | struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; |
| 94 | struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 95 | struct pci_dev *dma_dev; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 96 | dma_cap_mask_t mask; |
| 97 | |
| 98 | /* |
| 99 | * Get pci device for DMA controller, currently it could only |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 100 | * be the DMA controller of Medfield |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 101 | */ |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 102 | dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); |
| 103 | if (!dma_dev) |
| 104 | return -ENODEV; |
| 105 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 106 | dma_cap_zero(mask); |
| 107 | dma_cap_set(DMA_SLAVE, mask); |
| 108 | |
| 109 | /* 1. Init rx channel */ |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 110 | rx->dma_dev = &dma_dev->dev; |
| 111 | dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 112 | if (!dws->rxchan) |
| 113 | goto err_exit; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 114 | |
| 115 | /* 2. Init tx channel */ |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 116 | tx->dma_dev = &dma_dev->dev; |
| 117 | dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 118 | if (!dws->txchan) |
| 119 | goto free_rxchan; |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 120 | |
| 121 | dws->master->dma_rx = dws->rxchan; |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 122 | dws->master->dma_tx = dws->txchan; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 123 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 124 | init_completion(&dws->dma_completion); |
| 125 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 126 | dw_spi_dma_maxburst_init(dws); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 127 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame] | 128 | dw_spi_dma_sg_burst_init(dws); |
| 129 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 130 | return 0; |
| 131 | |
| 132 | free_rxchan: |
| 133 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 134 | dws->rxchan = NULL; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 135 | err_exit: |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 136 | return -EBUSY; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 137 | } |
| 138 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 139 | static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 140 | { |
| 141 | dws->rxchan = dma_request_slave_channel(dev, "rx"); |
| 142 | if (!dws->rxchan) |
| 143 | return -ENODEV; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 144 | |
| 145 | dws->txchan = dma_request_slave_channel(dev, "tx"); |
| 146 | if (!dws->txchan) { |
| 147 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 148 | dws->rxchan = NULL; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 149 | return -ENODEV; |
| 150 | } |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 151 | |
| 152 | dws->master->dma_rx = dws->rxchan; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 153 | dws->master->dma_tx = dws->txchan; |
| 154 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 155 | init_completion(&dws->dma_completion); |
| 156 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 157 | dw_spi_dma_maxburst_init(dws); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 158 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame] | 159 | dw_spi_dma_sg_burst_init(dws); |
| 160 | |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 161 | return 0; |
| 162 | } |
| 163 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 164 | static void dw_spi_dma_exit(struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 165 | { |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 166 | if (dws->txchan) { |
| 167 | dmaengine_terminate_sync(dws->txchan); |
| 168 | dma_release_channel(dws->txchan); |
| 169 | } |
Andy Shevchenko | 8e45ef6 | 2014-09-18 20:08:53 +0300 | [diff] [blame] | 170 | |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 171 | if (dws->rxchan) { |
| 172 | dmaengine_terminate_sync(dws->rxchan); |
| 173 | dma_release_channel(dws->rxchan); |
| 174 | } |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 175 | } |
| 176 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 177 | static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 178 | { |
Serge Semin | bf64b66 | 2020-10-08 02:55:05 +0300 | [diff] [blame] | 179 | dw_spi_check_status(dws, false); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 180 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 181 | complete(&dws->dma_completion); |
Serge Semin | bf64b66 | 2020-10-08 02:55:05 +0300 | [diff] [blame] | 182 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 183 | return IRQ_HANDLED; |
| 184 | } |
| 185 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 186 | static bool dw_spi_can_dma(struct spi_controller *master, |
| 187 | struct spi_device *spi, struct spi_transfer *xfer) |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 188 | { |
Jarkko Nikula | 721483e | 2018-02-01 17:17:29 +0200 | [diff] [blame] | 189 | struct dw_spi *dws = spi_controller_get_devdata(master); |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 190 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 191 | return xfer->len > dws->fifo_len; |
| 192 | } |
| 193 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 194 | static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) |
| 195 | { |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 196 | if (n_bytes == 1) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 197 | return DMA_SLAVE_BUSWIDTH_1_BYTE; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 198 | else if (n_bytes == 2) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 199 | return DMA_SLAVE_BUSWIDTH_2_BYTES; |
| 200 | |
| 201 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; |
| 202 | } |
| 203 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 204 | static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed) |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 205 | { |
| 206 | unsigned long long ms; |
| 207 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 208 | ms = len * MSEC_PER_SEC * BITS_PER_BYTE; |
| 209 | do_div(ms, speed); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 210 | ms += ms + 200; |
| 211 | |
| 212 | if (ms > UINT_MAX) |
| 213 | ms = UINT_MAX; |
| 214 | |
| 215 | ms = wait_for_completion_timeout(&dws->dma_completion, |
| 216 | msecs_to_jiffies(ms)); |
| 217 | |
| 218 | if (ms == 0) { |
| 219 | dev_err(&dws->master->cur_msg->spi->dev, |
| 220 | "DMA transaction timed out\n"); |
| 221 | return -ETIMEDOUT; |
| 222 | } |
| 223 | |
| 224 | return 0; |
| 225 | } |
| 226 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 227 | static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) |
| 228 | { |
| 229 | return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT); |
| 230 | } |
| 231 | |
| 232 | static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, |
| 233 | struct spi_transfer *xfer) |
| 234 | { |
Serge Semin | cf75bae | 2020-10-08 02:55:04 +0300 | [diff] [blame] | 235 | int retry = SPI_WAIT_RETRIES; |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 236 | struct spi_delay delay; |
| 237 | u32 nents; |
| 238 | |
| 239 | nents = dw_readl(dws, DW_SPI_TXFLR); |
| 240 | delay.unit = SPI_DELAY_UNIT_SCK; |
| 241 | delay.value = nents * dws->n_bytes * BITS_PER_BYTE; |
| 242 | |
| 243 | while (dw_spi_dma_tx_busy(dws) && retry--) |
| 244 | spi_delay_exec(&delay, xfer); |
| 245 | |
| 246 | if (retry < 0) { |
| 247 | dev_err(&dws->master->dev, "Tx hanged up\n"); |
| 248 | return -EIO; |
| 249 | } |
| 250 | |
| 251 | return 0; |
| 252 | } |
| 253 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 254 | /* |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 255 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
| 256 | * channel will clear a corresponding bit. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 257 | */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 258 | static void dw_spi_dma_tx_done(void *arg) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 259 | { |
| 260 | struct dw_spi *dws = arg; |
| 261 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 262 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 263 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 264 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 265 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 266 | complete(&dws->dma_completion); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 267 | } |
| 268 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 269 | static int dw_spi_dma_config_tx(struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 270 | { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 271 | struct dma_slave_config txconf; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 272 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 273 | memset(&txconf, 0, sizeof(txconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 274 | txconf.direction = DMA_MEM_TO_DEV; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 275 | txconf.dst_addr = dws->dma_addr; |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 276 | txconf.dst_maxburst = dws->txburst; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 277 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 278 | txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 279 | txconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 280 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 281 | return dmaengine_slave_config(dws->txchan, &txconf); |
| 282 | } |
| 283 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 284 | static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl, |
| 285 | unsigned int nents) |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 286 | { |
| 287 | struct dma_async_tx_descriptor *txdesc; |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 288 | dma_cookie_t cookie; |
| 289 | int ret; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 290 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 291 | txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents, |
| 292 | DMA_MEM_TO_DEV, |
| 293 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 294 | if (!txdesc) |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 295 | return -ENOMEM; |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 296 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 297 | txdesc->callback = dw_spi_dma_tx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 298 | txdesc->callback_param = dws; |
| 299 | |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 300 | cookie = dmaengine_submit(txdesc); |
| 301 | ret = dma_submit_error(cookie); |
| 302 | if (ret) { |
| 303 | dmaengine_terminate_sync(dws->txchan); |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 304 | return ret; |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 305 | } |
| 306 | |
Serge Semin | ab7a4d7 | 2020-09-20 14:23:16 +0300 | [diff] [blame] | 307 | set_bit(TX_BUSY, &dws->dma_chan_busy); |
| 308 | |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 309 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 310 | } |
| 311 | |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 312 | static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) |
| 313 | { |
| 314 | return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT); |
| 315 | } |
| 316 | |
| 317 | static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) |
| 318 | { |
Serge Semin | cf75bae | 2020-10-08 02:55:04 +0300 | [diff] [blame] | 319 | int retry = SPI_WAIT_RETRIES; |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 320 | struct spi_delay delay; |
| 321 | unsigned long ns, us; |
| 322 | u32 nents; |
| 323 | |
| 324 | /* |
| 325 | * It's unlikely that DMA engine is still doing the data fetching, but |
| 326 | * if it's let's give it some reasonable time. The timeout calculation |
| 327 | * is based on the synchronous APB/SSI reference clock rate, on a |
| 328 | * number of data entries left in the Rx FIFO, times a number of clock |
| 329 | * periods normally needed for a single APB read/write transaction |
| 330 | * without PREADY signal utilized (which is true for the DW APB SSI |
| 331 | * controller). |
| 332 | */ |
| 333 | nents = dw_readl(dws, DW_SPI_RXFLR); |
| 334 | ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; |
| 335 | if (ns <= NSEC_PER_USEC) { |
| 336 | delay.unit = SPI_DELAY_UNIT_NSECS; |
| 337 | delay.value = ns; |
| 338 | } else { |
| 339 | us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
| 340 | delay.unit = SPI_DELAY_UNIT_USECS; |
| 341 | delay.value = clamp_val(us, 0, USHRT_MAX); |
| 342 | } |
| 343 | |
| 344 | while (dw_spi_dma_rx_busy(dws) && retry--) |
| 345 | spi_delay_exec(&delay, NULL); |
| 346 | |
| 347 | if (retry < 0) { |
| 348 | dev_err(&dws->master->dev, "Rx hanged up\n"); |
| 349 | return -EIO; |
| 350 | } |
| 351 | |
| 352 | return 0; |
| 353 | } |
| 354 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 355 | /* |
| 356 | * dws->dma_chan_busy is set before the dma transfer starts, callback for rx |
| 357 | * channel will clear a corresponding bit. |
| 358 | */ |
| 359 | static void dw_spi_dma_rx_done(void *arg) |
| 360 | { |
| 361 | struct dw_spi *dws = arg; |
| 362 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 363 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 364 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 365 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 366 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 367 | complete(&dws->dma_completion); |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 368 | } |
| 369 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 370 | static int dw_spi_dma_config_rx(struct dw_spi *dws) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 371 | { |
| 372 | struct dma_slave_config rxconf; |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 373 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 374 | memset(&rxconf, 0, sizeof(rxconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 375 | rxconf.direction = DMA_DEV_TO_MEM; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 376 | rxconf.src_addr = dws->dma_addr; |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 377 | rxconf.src_maxburst = dws->rxburst; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 378 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 379 | rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 380 | rxconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 381 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 382 | return dmaengine_slave_config(dws->rxchan, &rxconf); |
| 383 | } |
| 384 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 385 | static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl, |
| 386 | unsigned int nents) |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 387 | { |
| 388 | struct dma_async_tx_descriptor *rxdesc; |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 389 | dma_cookie_t cookie; |
| 390 | int ret; |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 391 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 392 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents, |
| 393 | DMA_DEV_TO_MEM, |
| 394 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 395 | if (!rxdesc) |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 396 | return -ENOMEM; |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 397 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 398 | rxdesc->callback = dw_spi_dma_rx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 399 | rxdesc->callback_param = dws; |
| 400 | |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 401 | cookie = dmaengine_submit(rxdesc); |
| 402 | ret = dma_submit_error(cookie); |
| 403 | if (ret) { |
| 404 | dmaengine_terminate_sync(dws->rxchan); |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 405 | return ret; |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 406 | } |
| 407 | |
Serge Semin | ab7a4d7 | 2020-09-20 14:23:16 +0300 | [diff] [blame] | 408 | set_bit(RX_BUSY, &dws->dma_chan_busy); |
| 409 | |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 410 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 411 | } |
| 412 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 413 | static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 414 | { |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 415 | u16 imr, dma_ctrl; |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 416 | int ret; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 417 | |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 418 | if (!xfer->tx_buf) |
| 419 | return -EINVAL; |
| 420 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 421 | /* Setup DMA channels */ |
| 422 | ret = dw_spi_dma_config_tx(dws); |
| 423 | if (ret) |
| 424 | return ret; |
| 425 | |
| 426 | if (xfer->rx_buf) { |
| 427 | ret = dw_spi_dma_config_rx(dws); |
| 428 | if (ret) |
| 429 | return ret; |
| 430 | } |
| 431 | |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 432 | /* Set the DMA handshaking interface */ |
| 433 | dma_ctrl = SPI_DMA_TDMAE; |
Andy Shevchenko | 3d7db0f | 2020-05-29 21:31:50 +0300 | [diff] [blame] | 434 | if (xfer->rx_buf) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 435 | dma_ctrl |= SPI_DMA_RDMAE; |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 436 | dw_writel(dws, DW_SPI_DMACR, dma_ctrl); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 437 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 438 | /* Set the interrupt mask */ |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 439 | imr = SPI_INT_TXOI; |
Andy Shevchenko | 3d7db0f | 2020-05-29 21:31:50 +0300 | [diff] [blame] | 440 | if (xfer->rx_buf) |
| 441 | imr |= SPI_INT_RXUI | SPI_INT_RXOI; |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 442 | spi_umask_intr(dws, imr); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 443 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 444 | reinit_completion(&dws->dma_completion); |
| 445 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 446 | dws->transfer_handler = dw_spi_dma_transfer_handler; |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 447 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 448 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 449 | } |
| 450 | |
Serge Semin | b86fed12 | 2020-09-20 14:23:19 +0300 | [diff] [blame] | 451 | static int dw_spi_dma_transfer_all(struct dw_spi *dws, |
| 452 | struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 453 | { |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 454 | int ret; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 455 | |
Serge Semin | ab7a4d7 | 2020-09-20 14:23:16 +0300 | [diff] [blame] | 456 | /* Submit the DMA Tx transfer */ |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 457 | ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents); |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 458 | if (ret) |
Serge Semin | 945b5b6 | 2020-09-20 14:23:20 +0300 | [diff] [blame] | 459 | goto err_clear_dmac; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 460 | |
Serge Semin | ab7a4d7 | 2020-09-20 14:23:16 +0300 | [diff] [blame] | 461 | /* Submit the DMA Rx transfer if required */ |
Serge Semin | be3034d | 2020-09-20 14:23:15 +0300 | [diff] [blame] | 462 | if (xfer->rx_buf) { |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 463 | ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl, |
| 464 | xfer->rx_sg.nents); |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 465 | if (ret) |
Serge Semin | 945b5b6 | 2020-09-20 14:23:20 +0300 | [diff] [blame] | 466 | goto err_clear_dmac; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 467 | |
Serge Semin | be3034d | 2020-09-20 14:23:15 +0300 | [diff] [blame] | 468 | /* rx must be started before tx due to spi instinct */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 469 | dma_async_issue_pending(dws->rxchan); |
| 470 | } |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 471 | |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 472 | dma_async_issue_pending(dws->txchan); |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 473 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 474 | ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz); |
Serge Semin | 945b5b6 | 2020-09-20 14:23:20 +0300 | [diff] [blame] | 475 | |
| 476 | err_clear_dmac: |
| 477 | dw_writel(dws, DW_SPI_DMACR, 0); |
| 478 | |
| 479 | return ret; |
Serge Semin | b86fed12 | 2020-09-20 14:23:19 +0300 | [diff] [blame] | 480 | } |
| 481 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame] | 482 | /* |
| 483 | * In case if at least one of the requested DMA channels doesn't support the |
| 484 | * hardware accelerated SG list entries traverse, the DMA driver will most |
| 485 | * likely work that around by performing the IRQ-based SG list entries |
| 486 | * resubmission. That might and will cause a problem if the DMA Tx channel is |
| 487 | * recharged and re-executed before the Rx DMA channel. Due to |
| 488 | * non-deterministic IRQ-handler execution latency the DMA Tx channel will |
| 489 | * start pushing data to the SPI bus before the Rx DMA channel is even |
| 490 | * reinitialized with the next inbound SG list entry. By doing so the DMA Tx |
| 491 | * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while |
| 492 | * the DMA Rx channel being recharged and re-executed will eventually be |
| 493 | * overflown. |
| 494 | * |
| 495 | * In order to solve the problem we have to feed the DMA engine with SG list |
| 496 | * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs |
| 497 | * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg |
| 498 | * and rx_sg lists may have different number of entries of different lengths |
| 499 | * (though total length should match) let's virtually split the SG-lists to the |
| 500 | * set of DMA transfers, which length is a minimum of the ordered SG-entries |
| 501 | * lengths. An ASCII-sketch of the implemented algo is following: |
| 502 | * xfer->len |
| 503 | * |___________| |
| 504 | * tx_sg list: |___|____|__| |
| 505 | * rx_sg list: |_|____|____| |
| 506 | * DMA transfers: |_|_|__|_|__| |
| 507 | * |
| 508 | * Note in order to have this workaround solving the denoted problem the DMA |
| 509 | * engine driver should properly initialize the max_sg_burst capability and set |
| 510 | * the DMA device max segment size parameter with maximum data block size the |
| 511 | * DMA engine supports. |
| 512 | */ |
| 513 | |
| 514 | static int dw_spi_dma_transfer_one(struct dw_spi *dws, |
| 515 | struct spi_transfer *xfer) |
Serge Semin | b86fed12 | 2020-09-20 14:23:19 +0300 | [diff] [blame] | 516 | { |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame] | 517 | struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp; |
| 518 | unsigned int tx_len = 0, rx_len = 0; |
| 519 | unsigned int base, len; |
Serge Semin | b86fed12 | 2020-09-20 14:23:19 +0300 | [diff] [blame] | 520 | int ret; |
| 521 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame] | 522 | sg_init_table(&tx_tmp, 1); |
| 523 | sg_init_table(&rx_tmp, 1); |
| 524 | |
| 525 | for (base = 0, len = 0; base < xfer->len; base += len) { |
| 526 | /* Fetch next Tx DMA data chunk */ |
| 527 | if (!tx_len) { |
| 528 | tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg); |
| 529 | sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg); |
| 530 | tx_len = sg_dma_len(tx_sg); |
| 531 | } |
| 532 | |
| 533 | /* Fetch next Rx DMA data chunk */ |
| 534 | if (!rx_len) { |
| 535 | rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg); |
| 536 | sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg); |
| 537 | rx_len = sg_dma_len(rx_sg); |
| 538 | } |
| 539 | |
| 540 | len = min(tx_len, rx_len); |
| 541 | |
| 542 | sg_dma_len(&tx_tmp) = len; |
| 543 | sg_dma_len(&rx_tmp) = len; |
| 544 | |
| 545 | /* Submit DMA Tx transfer */ |
| 546 | ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1); |
| 547 | if (ret) |
| 548 | break; |
| 549 | |
| 550 | /* Submit DMA Rx transfer */ |
| 551 | ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1); |
| 552 | if (ret) |
| 553 | break; |
| 554 | |
| 555 | /* Rx must be started before Tx due to SPI instinct */ |
| 556 | dma_async_issue_pending(dws->rxchan); |
| 557 | |
| 558 | dma_async_issue_pending(dws->txchan); |
| 559 | |
| 560 | /* |
| 561 | * Here we only need to wait for the DMA transfer to be |
| 562 | * finished since SPI controller is kept enabled during the |
| 563 | * procedure this loop implements and there is no risk to lose |
| 564 | * data left in the Tx/Rx FIFOs. |
| 565 | */ |
| 566 | ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz); |
| 567 | if (ret) |
| 568 | break; |
| 569 | |
| 570 | reinit_completion(&dws->dma_completion); |
| 571 | |
| 572 | sg_dma_address(&tx_tmp) += len; |
| 573 | sg_dma_address(&rx_tmp) += len; |
| 574 | tx_len -= len; |
| 575 | rx_len -= len; |
| 576 | } |
| 577 | |
| 578 | dw_writel(dws, DW_SPI_DMACR, 0); |
| 579 | |
| 580 | return ret; |
| 581 | } |
| 582 | |
| 583 | static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
| 584 | { |
| 585 | unsigned int nents; |
| 586 | int ret; |
| 587 | |
| 588 | nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents); |
| 589 | |
| 590 | /* |
| 591 | * Execute normal DMA-based transfer (which submits the Rx and Tx SG |
| 592 | * lists directly to the DMA engine at once) if either full hardware |
| 593 | * accelerated SG list traverse is supported by both channels, or the |
| 594 | * Tx-only SPI transfer is requested, or the DMA engine is capable to |
| 595 | * handle both SG lists on hardware accelerated basis. |
| 596 | */ |
| 597 | if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst) |
| 598 | ret = dw_spi_dma_transfer_all(dws, xfer); |
| 599 | else |
| 600 | ret = dw_spi_dma_transfer_one(dws, xfer); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 601 | if (ret) |
| 602 | return ret; |
| 603 | |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 604 | if (dws->master->cur_msg->status == -EINPROGRESS) { |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 605 | ret = dw_spi_dma_wait_tx_done(dws, xfer); |
| 606 | if (ret) |
| 607 | return ret; |
| 608 | } |
| 609 | |
Serge Semin | be3034d | 2020-09-20 14:23:15 +0300 | [diff] [blame] | 610 | if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS) |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 611 | ret = dw_spi_dma_wait_rx_done(dws); |
| 612 | |
| 613 | return ret; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 614 | } |
| 615 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 616 | static void dw_spi_dma_stop(struct dw_spi *dws) |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 617 | { |
| 618 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 619 | dmaengine_terminate_sync(dws->txchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 620 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 621 | } |
| 622 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 623 | dmaengine_terminate_sync(dws->rxchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 624 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 625 | } |
| 626 | } |
| 627 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 628 | static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { |
| 629 | .dma_init = dw_spi_dma_init_mfld, |
| 630 | .dma_exit = dw_spi_dma_exit, |
| 631 | .dma_setup = dw_spi_dma_setup, |
| 632 | .can_dma = dw_spi_can_dma, |
| 633 | .dma_transfer = dw_spi_dma_transfer, |
| 634 | .dma_stop = dw_spi_dma_stop, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 635 | }; |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 636 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 637 | void dw_spi_dma_setup_mfld(struct dw_spi *dws) |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 638 | { |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 639 | dws->dma_ops = &dw_spi_dma_mfld_ops; |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 640 | } |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 641 | EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld); |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 642 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 643 | static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { |
| 644 | .dma_init = dw_spi_dma_init_generic, |
| 645 | .dma_exit = dw_spi_dma_exit, |
| 646 | .dma_setup = dw_spi_dma_setup, |
| 647 | .can_dma = dw_spi_can_dma, |
| 648 | .dma_transfer = dw_spi_dma_transfer, |
| 649 | .dma_stop = dw_spi_dma_stop, |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 650 | }; |
| 651 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 652 | void dw_spi_dma_setup_generic(struct dw_spi *dws) |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 653 | { |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 654 | dws->dma_ops = &dw_spi_dma_generic_ops; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 655 | } |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 656 | EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic); |