Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 2 | /* |
Serge Semin | 6c710c0 | 2020-05-29 16:11:59 +0300 | [diff] [blame] | 3 | * Special handling for DW DMA core |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 4 | * |
Andy Shevchenko | 197e96b | 2014-09-12 15:12:01 +0300 | [diff] [blame] | 5 | * Copyright (c) 2009, 2014 Intel Corporation. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 6 | */ |
| 7 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 8 | #include <linux/completion.h> |
Andy Shevchenko | e794095 | 2020-05-06 18:30:22 +0300 | [diff] [blame] | 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/dmaengine.h> |
Andy Shevchenko | e62a15d | 2020-05-06 18:30:21 +0300 | [diff] [blame] | 11 | #include <linux/irqreturn.h> |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 12 | #include <linux/jiffies.h> |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 13 | #include <linux/pci.h> |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 14 | #include <linux/platform_data/dma-dw.h> |
Serge Semin | 6c710c0 | 2020-05-29 16:11:59 +0300 | [diff] [blame] | 15 | #include <linux/spi/spi.h> |
| 16 | #include <linux/types.h> |
| 17 | |
| 18 | #include "spi-dw.h" |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 19 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 20 | #define WAIT_RETRIES 5 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 21 | #define RX_BUSY 0 |
Serge Semin | c534df9 | 2020-05-29 16:11:55 +0300 | [diff] [blame] | 22 | #define RX_BURST_LEVEL 16 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 23 | #define TX_BUSY 1 |
Serge Semin | c534df9 | 2020-05-29 16:11:55 +0300 | [diff] [blame] | 24 | #define TX_BURST_LEVEL 16 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 25 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 26 | static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 27 | { |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 28 | struct dw_dma_slave *s = param; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 29 | |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 30 | if (s->dma_dev != chan->device->dev) |
| 31 | return false; |
| 32 | |
| 33 | chan->private = s; |
| 34 | return true; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 35 | } |
| 36 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 37 | static void dw_spi_dma_maxburst_init(struct dw_spi *dws) |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 38 | { |
| 39 | struct dma_slave_caps caps; |
| 40 | u32 max_burst, def_burst; |
| 41 | int ret; |
| 42 | |
| 43 | def_burst = dws->fifo_len / 2; |
| 44 | |
| 45 | ret = dma_get_slave_caps(dws->rxchan, &caps); |
| 46 | if (!ret && caps.max_burst) |
| 47 | max_burst = caps.max_burst; |
| 48 | else |
| 49 | max_burst = RX_BURST_LEVEL; |
| 50 | |
| 51 | dws->rxburst = min(max_burst, def_burst); |
Serge Semin | 01ddbbb | 2020-09-20 14:23:12 +0300 | [diff] [blame] | 52 | dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 53 | |
| 54 | ret = dma_get_slave_caps(dws->txchan, &caps); |
| 55 | if (!ret && caps.max_burst) |
| 56 | max_burst = caps.max_burst; |
| 57 | else |
| 58 | max_burst = TX_BURST_LEVEL; |
| 59 | |
Serge Semin | 01ddbbb | 2020-09-20 14:23:12 +0300 | [diff] [blame] | 60 | /* |
| 61 | * Having a Rx DMA channel serviced with higher priority than a Tx DMA |
| 62 | * channel might not be enough to provide a well balanced DMA-based |
| 63 | * SPI transfer interface. There might still be moments when the Tx DMA |
| 64 | * channel is occasionally handled faster than the Rx DMA channel. |
| 65 | * That in its turn will eventually cause the SPI Rx FIFO overflow if |
| 66 | * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's |
| 67 | * cleared by the Rx DMA channel. In order to fix the problem the Tx |
| 68 | * DMA activity is intentionally slowed down by limiting the SPI Tx |
| 69 | * FIFO depth with a value twice bigger than the Tx burst length. |
| 70 | */ |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 71 | dws->txburst = min(max_burst, def_burst); |
Serge Semin | 01ddbbb | 2020-09-20 14:23:12 +0300 | [diff] [blame] | 72 | dw_writel(dws, DW_SPI_DMATDLR, dws->txburst); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 73 | } |
| 74 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame^] | 75 | static void dw_spi_dma_sg_burst_init(struct dw_spi *dws) |
| 76 | { |
| 77 | struct dma_slave_caps tx = {0}, rx = {0}; |
| 78 | |
| 79 | dma_get_slave_caps(dws->txchan, &tx); |
| 80 | dma_get_slave_caps(dws->rxchan, &rx); |
| 81 | |
| 82 | if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0) |
| 83 | dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst); |
| 84 | else if (tx.max_sg_burst > 0) |
| 85 | dws->dma_sg_burst = tx.max_sg_burst; |
| 86 | else if (rx.max_sg_burst > 0) |
| 87 | dws->dma_sg_burst = rx.max_sg_burst; |
| 88 | else |
| 89 | dws->dma_sg_burst = 0; |
| 90 | } |
| 91 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 92 | static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 93 | { |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 94 | struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; |
| 95 | struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 96 | struct pci_dev *dma_dev; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 97 | dma_cap_mask_t mask; |
| 98 | |
| 99 | /* |
| 100 | * Get pci device for DMA controller, currently it could only |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 101 | * be the DMA controller of Medfield |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 102 | */ |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 103 | dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); |
| 104 | if (!dma_dev) |
| 105 | return -ENODEV; |
| 106 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 107 | dma_cap_zero(mask); |
| 108 | dma_cap_set(DMA_SLAVE, mask); |
| 109 | |
| 110 | /* 1. Init rx channel */ |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 111 | rx->dma_dev = &dma_dev->dev; |
| 112 | dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 113 | if (!dws->rxchan) |
| 114 | goto err_exit; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 115 | |
| 116 | /* 2. Init tx channel */ |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 117 | tx->dma_dev = &dma_dev->dev; |
| 118 | dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 119 | if (!dws->txchan) |
| 120 | goto free_rxchan; |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 121 | |
| 122 | dws->master->dma_rx = dws->rxchan; |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 123 | dws->master->dma_tx = dws->txchan; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 124 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 125 | init_completion(&dws->dma_completion); |
| 126 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 127 | dw_spi_dma_maxburst_init(dws); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 128 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame^] | 129 | dw_spi_dma_sg_burst_init(dws); |
| 130 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 131 | return 0; |
| 132 | |
| 133 | free_rxchan: |
| 134 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 135 | dws->rxchan = NULL; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 136 | err_exit: |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 137 | return -EBUSY; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 138 | } |
| 139 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 140 | static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 141 | { |
| 142 | dws->rxchan = dma_request_slave_channel(dev, "rx"); |
| 143 | if (!dws->rxchan) |
| 144 | return -ENODEV; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 145 | |
| 146 | dws->txchan = dma_request_slave_channel(dev, "tx"); |
| 147 | if (!dws->txchan) { |
| 148 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 149 | dws->rxchan = NULL; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 150 | return -ENODEV; |
| 151 | } |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 152 | |
| 153 | dws->master->dma_rx = dws->rxchan; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 154 | dws->master->dma_tx = dws->txchan; |
| 155 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 156 | init_completion(&dws->dma_completion); |
| 157 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 158 | dw_spi_dma_maxburst_init(dws); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 159 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame^] | 160 | dw_spi_dma_sg_burst_init(dws); |
| 161 | |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 162 | return 0; |
| 163 | } |
| 164 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 165 | static void dw_spi_dma_exit(struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 166 | { |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 167 | if (dws->txchan) { |
| 168 | dmaengine_terminate_sync(dws->txchan); |
| 169 | dma_release_channel(dws->txchan); |
| 170 | } |
Andy Shevchenko | 8e45ef6 | 2014-09-18 20:08:53 +0300 | [diff] [blame] | 171 | |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 172 | if (dws->rxchan) { |
| 173 | dmaengine_terminate_sync(dws->rxchan); |
| 174 | dma_release_channel(dws->rxchan); |
| 175 | } |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 176 | } |
| 177 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 178 | static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 179 | { |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 180 | u16 irq_status = dw_readl(dws, DW_SPI_ISR); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 181 | |
| 182 | if (!irq_status) |
| 183 | return IRQ_NONE; |
| 184 | |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 185 | dw_readl(dws, DW_SPI_ICR); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 186 | spi_reset_chip(dws); |
| 187 | |
| 188 | dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); |
| 189 | dws->master->cur_msg->status = -EIO; |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 190 | complete(&dws->dma_completion); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 191 | return IRQ_HANDLED; |
| 192 | } |
| 193 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 194 | static bool dw_spi_can_dma(struct spi_controller *master, |
| 195 | struct spi_device *spi, struct spi_transfer *xfer) |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 196 | { |
Jarkko Nikula | 721483e | 2018-02-01 17:17:29 +0200 | [diff] [blame] | 197 | struct dw_spi *dws = spi_controller_get_devdata(master); |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 198 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 199 | return xfer->len > dws->fifo_len; |
| 200 | } |
| 201 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 202 | static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) |
| 203 | { |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 204 | if (n_bytes == 1) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 205 | return DMA_SLAVE_BUSWIDTH_1_BYTE; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 206 | else if (n_bytes == 2) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 207 | return DMA_SLAVE_BUSWIDTH_2_BYTES; |
| 208 | |
| 209 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; |
| 210 | } |
| 211 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 212 | static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed) |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 213 | { |
| 214 | unsigned long long ms; |
| 215 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 216 | ms = len * MSEC_PER_SEC * BITS_PER_BYTE; |
| 217 | do_div(ms, speed); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 218 | ms += ms + 200; |
| 219 | |
| 220 | if (ms > UINT_MAX) |
| 221 | ms = UINT_MAX; |
| 222 | |
| 223 | ms = wait_for_completion_timeout(&dws->dma_completion, |
| 224 | msecs_to_jiffies(ms)); |
| 225 | |
| 226 | if (ms == 0) { |
| 227 | dev_err(&dws->master->cur_msg->spi->dev, |
| 228 | "DMA transaction timed out\n"); |
| 229 | return -ETIMEDOUT; |
| 230 | } |
| 231 | |
| 232 | return 0; |
| 233 | } |
| 234 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 235 | static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) |
| 236 | { |
| 237 | return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT); |
| 238 | } |
| 239 | |
| 240 | static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, |
| 241 | struct spi_transfer *xfer) |
| 242 | { |
| 243 | int retry = WAIT_RETRIES; |
| 244 | struct spi_delay delay; |
| 245 | u32 nents; |
| 246 | |
| 247 | nents = dw_readl(dws, DW_SPI_TXFLR); |
| 248 | delay.unit = SPI_DELAY_UNIT_SCK; |
| 249 | delay.value = nents * dws->n_bytes * BITS_PER_BYTE; |
| 250 | |
| 251 | while (dw_spi_dma_tx_busy(dws) && retry--) |
| 252 | spi_delay_exec(&delay, xfer); |
| 253 | |
| 254 | if (retry < 0) { |
| 255 | dev_err(&dws->master->dev, "Tx hanged up\n"); |
| 256 | return -EIO; |
| 257 | } |
| 258 | |
| 259 | return 0; |
| 260 | } |
| 261 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 262 | /* |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 263 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
| 264 | * channel will clear a corresponding bit. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 265 | */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 266 | static void dw_spi_dma_tx_done(void *arg) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 267 | { |
| 268 | struct dw_spi *dws = arg; |
| 269 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 270 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 271 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 272 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 273 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 274 | complete(&dws->dma_completion); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 275 | } |
| 276 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 277 | static int dw_spi_dma_config_tx(struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 278 | { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 279 | struct dma_slave_config txconf; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 280 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 281 | memset(&txconf, 0, sizeof(txconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 282 | txconf.direction = DMA_MEM_TO_DEV; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 283 | txconf.dst_addr = dws->dma_addr; |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 284 | txconf.dst_maxburst = dws->txburst; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 285 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 286 | txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 287 | txconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 288 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 289 | return dmaengine_slave_config(dws->txchan, &txconf); |
| 290 | } |
| 291 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 292 | static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl, |
| 293 | unsigned int nents) |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 294 | { |
| 295 | struct dma_async_tx_descriptor *txdesc; |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 296 | dma_cookie_t cookie; |
| 297 | int ret; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 298 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 299 | txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents, |
| 300 | DMA_MEM_TO_DEV, |
| 301 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 302 | if (!txdesc) |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 303 | return -ENOMEM; |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 304 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 305 | txdesc->callback = dw_spi_dma_tx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 306 | txdesc->callback_param = dws; |
| 307 | |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 308 | cookie = dmaengine_submit(txdesc); |
| 309 | ret = dma_submit_error(cookie); |
| 310 | if (ret) { |
| 311 | dmaengine_terminate_sync(dws->txchan); |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 312 | return ret; |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 313 | } |
| 314 | |
Serge Semin | ab7a4d7 | 2020-09-20 14:23:16 +0300 | [diff] [blame] | 315 | set_bit(TX_BUSY, &dws->dma_chan_busy); |
| 316 | |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 317 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 318 | } |
| 319 | |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 320 | static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) |
| 321 | { |
| 322 | return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT); |
| 323 | } |
| 324 | |
| 325 | static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) |
| 326 | { |
| 327 | int retry = WAIT_RETRIES; |
| 328 | struct spi_delay delay; |
| 329 | unsigned long ns, us; |
| 330 | u32 nents; |
| 331 | |
| 332 | /* |
| 333 | * It's unlikely that DMA engine is still doing the data fetching, but |
| 334 | * if it's let's give it some reasonable time. The timeout calculation |
| 335 | * is based on the synchronous APB/SSI reference clock rate, on a |
| 336 | * number of data entries left in the Rx FIFO, times a number of clock |
| 337 | * periods normally needed for a single APB read/write transaction |
| 338 | * without PREADY signal utilized (which is true for the DW APB SSI |
| 339 | * controller). |
| 340 | */ |
| 341 | nents = dw_readl(dws, DW_SPI_RXFLR); |
| 342 | ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; |
| 343 | if (ns <= NSEC_PER_USEC) { |
| 344 | delay.unit = SPI_DELAY_UNIT_NSECS; |
| 345 | delay.value = ns; |
| 346 | } else { |
| 347 | us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
| 348 | delay.unit = SPI_DELAY_UNIT_USECS; |
| 349 | delay.value = clamp_val(us, 0, USHRT_MAX); |
| 350 | } |
| 351 | |
| 352 | while (dw_spi_dma_rx_busy(dws) && retry--) |
| 353 | spi_delay_exec(&delay, NULL); |
| 354 | |
| 355 | if (retry < 0) { |
| 356 | dev_err(&dws->master->dev, "Rx hanged up\n"); |
| 357 | return -EIO; |
| 358 | } |
| 359 | |
| 360 | return 0; |
| 361 | } |
| 362 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 363 | /* |
| 364 | * dws->dma_chan_busy is set before the dma transfer starts, callback for rx |
| 365 | * channel will clear a corresponding bit. |
| 366 | */ |
| 367 | static void dw_spi_dma_rx_done(void *arg) |
| 368 | { |
| 369 | struct dw_spi *dws = arg; |
| 370 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 371 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 372 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 373 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 374 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 375 | complete(&dws->dma_completion); |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 376 | } |
| 377 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 378 | static int dw_spi_dma_config_rx(struct dw_spi *dws) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 379 | { |
| 380 | struct dma_slave_config rxconf; |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 381 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 382 | memset(&rxconf, 0, sizeof(rxconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 383 | rxconf.direction = DMA_DEV_TO_MEM; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 384 | rxconf.src_addr = dws->dma_addr; |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 385 | rxconf.src_maxburst = dws->rxburst; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 386 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 387 | rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 388 | rxconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 389 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 390 | return dmaengine_slave_config(dws->rxchan, &rxconf); |
| 391 | } |
| 392 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 393 | static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl, |
| 394 | unsigned int nents) |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 395 | { |
| 396 | struct dma_async_tx_descriptor *rxdesc; |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 397 | dma_cookie_t cookie; |
| 398 | int ret; |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 399 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 400 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents, |
| 401 | DMA_DEV_TO_MEM, |
| 402 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 403 | if (!rxdesc) |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 404 | return -ENOMEM; |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 405 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 406 | rxdesc->callback = dw_spi_dma_rx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 407 | rxdesc->callback_param = dws; |
| 408 | |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 409 | cookie = dmaengine_submit(rxdesc); |
| 410 | ret = dma_submit_error(cookie); |
| 411 | if (ret) { |
| 412 | dmaengine_terminate_sync(dws->rxchan); |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 413 | return ret; |
Serge Semin | 9a6471a | 2020-09-20 14:23:17 +0300 | [diff] [blame] | 414 | } |
| 415 | |
Serge Semin | ab7a4d7 | 2020-09-20 14:23:16 +0300 | [diff] [blame] | 416 | set_bit(RX_BUSY, &dws->dma_chan_busy); |
| 417 | |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 418 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 419 | } |
| 420 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 421 | static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 422 | { |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 423 | u16 imr, dma_ctrl; |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 424 | int ret; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 425 | |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 426 | if (!xfer->tx_buf) |
| 427 | return -EINVAL; |
| 428 | |
Serge Semin | a874d81 | 2020-09-20 14:23:14 +0300 | [diff] [blame] | 429 | /* Setup DMA channels */ |
| 430 | ret = dw_spi_dma_config_tx(dws); |
| 431 | if (ret) |
| 432 | return ret; |
| 433 | |
| 434 | if (xfer->rx_buf) { |
| 435 | ret = dw_spi_dma_config_rx(dws); |
| 436 | if (ret) |
| 437 | return ret; |
| 438 | } |
| 439 | |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 440 | /* Set the DMA handshaking interface */ |
| 441 | dma_ctrl = SPI_DMA_TDMAE; |
Andy Shevchenko | 3d7db0f | 2020-05-29 21:31:50 +0300 | [diff] [blame] | 442 | if (xfer->rx_buf) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 443 | dma_ctrl |= SPI_DMA_RDMAE; |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 444 | dw_writel(dws, DW_SPI_DMACR, dma_ctrl); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 445 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 446 | /* Set the interrupt mask */ |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 447 | imr = SPI_INT_TXOI; |
Andy Shevchenko | 3d7db0f | 2020-05-29 21:31:50 +0300 | [diff] [blame] | 448 | if (xfer->rx_buf) |
| 449 | imr |= SPI_INT_RXUI | SPI_INT_RXOI; |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 450 | spi_umask_intr(dws, imr); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 451 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 452 | reinit_completion(&dws->dma_completion); |
| 453 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 454 | dws->transfer_handler = dw_spi_dma_transfer_handler; |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 455 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 456 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 457 | } |
| 458 | |
Serge Semin | b86fed12 | 2020-09-20 14:23:19 +0300 | [diff] [blame] | 459 | static int dw_spi_dma_transfer_all(struct dw_spi *dws, |
| 460 | struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 461 | { |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 462 | int ret; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 463 | |
Serge Semin | ab7a4d7 | 2020-09-20 14:23:16 +0300 | [diff] [blame] | 464 | /* Submit the DMA Tx transfer */ |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 465 | ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents); |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 466 | if (ret) |
Serge Semin | 945b5b6 | 2020-09-20 14:23:20 +0300 | [diff] [blame] | 467 | goto err_clear_dmac; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 468 | |
Serge Semin | ab7a4d7 | 2020-09-20 14:23:16 +0300 | [diff] [blame] | 469 | /* Submit the DMA Rx transfer if required */ |
Serge Semin | be3034d | 2020-09-20 14:23:15 +0300 | [diff] [blame] | 470 | if (xfer->rx_buf) { |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 471 | ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl, |
| 472 | xfer->rx_sg.nents); |
Serge Semin | 7a4d61f | 2020-09-20 14:23:18 +0300 | [diff] [blame] | 473 | if (ret) |
Serge Semin | 945b5b6 | 2020-09-20 14:23:20 +0300 | [diff] [blame] | 474 | goto err_clear_dmac; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 475 | |
Serge Semin | be3034d | 2020-09-20 14:23:15 +0300 | [diff] [blame] | 476 | /* rx must be started before tx due to spi instinct */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 477 | dma_async_issue_pending(dws->rxchan); |
| 478 | } |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 479 | |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 480 | dma_async_issue_pending(dws->txchan); |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 481 | |
Serge Semin | 917ce29 | 2020-09-20 14:23:21 +0300 | [diff] [blame] | 482 | ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz); |
Serge Semin | 945b5b6 | 2020-09-20 14:23:20 +0300 | [diff] [blame] | 483 | |
| 484 | err_clear_dmac: |
| 485 | dw_writel(dws, DW_SPI_DMACR, 0); |
| 486 | |
| 487 | return ret; |
Serge Semin | b86fed12 | 2020-09-20 14:23:19 +0300 | [diff] [blame] | 488 | } |
| 489 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame^] | 490 | /* |
| 491 | * In case if at least one of the requested DMA channels doesn't support the |
| 492 | * hardware accelerated SG list entries traverse, the DMA driver will most |
| 493 | * likely work that around by performing the IRQ-based SG list entries |
| 494 | * resubmission. That might and will cause a problem if the DMA Tx channel is |
| 495 | * recharged and re-executed before the Rx DMA channel. Due to |
| 496 | * non-deterministic IRQ-handler execution latency the DMA Tx channel will |
| 497 | * start pushing data to the SPI bus before the Rx DMA channel is even |
| 498 | * reinitialized with the next inbound SG list entry. By doing so the DMA Tx |
| 499 | * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while |
| 500 | * the DMA Rx channel being recharged and re-executed will eventually be |
| 501 | * overflown. |
| 502 | * |
| 503 | * In order to solve the problem we have to feed the DMA engine with SG list |
| 504 | * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs |
| 505 | * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg |
| 506 | * and rx_sg lists may have different number of entries of different lengths |
| 507 | * (though total length should match) let's virtually split the SG-lists to the |
| 508 | * set of DMA transfers, which length is a minimum of the ordered SG-entries |
| 509 | * lengths. An ASCII-sketch of the implemented algo is following: |
| 510 | * xfer->len |
| 511 | * |___________| |
| 512 | * tx_sg list: |___|____|__| |
| 513 | * rx_sg list: |_|____|____| |
| 514 | * DMA transfers: |_|_|__|_|__| |
| 515 | * |
| 516 | * Note in order to have this workaround solving the denoted problem the DMA |
| 517 | * engine driver should properly initialize the max_sg_burst capability and set |
| 518 | * the DMA device max segment size parameter with maximum data block size the |
| 519 | * DMA engine supports. |
| 520 | */ |
| 521 | |
| 522 | static int dw_spi_dma_transfer_one(struct dw_spi *dws, |
| 523 | struct spi_transfer *xfer) |
Serge Semin | b86fed12 | 2020-09-20 14:23:19 +0300 | [diff] [blame] | 524 | { |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame^] | 525 | struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp; |
| 526 | unsigned int tx_len = 0, rx_len = 0; |
| 527 | unsigned int base, len; |
Serge Semin | b86fed12 | 2020-09-20 14:23:19 +0300 | [diff] [blame] | 528 | int ret; |
| 529 | |
Serge Semin | ad4fe12 | 2020-09-20 14:23:22 +0300 | [diff] [blame^] | 530 | sg_init_table(&tx_tmp, 1); |
| 531 | sg_init_table(&rx_tmp, 1); |
| 532 | |
| 533 | for (base = 0, len = 0; base < xfer->len; base += len) { |
| 534 | /* Fetch next Tx DMA data chunk */ |
| 535 | if (!tx_len) { |
| 536 | tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg); |
| 537 | sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg); |
| 538 | tx_len = sg_dma_len(tx_sg); |
| 539 | } |
| 540 | |
| 541 | /* Fetch next Rx DMA data chunk */ |
| 542 | if (!rx_len) { |
| 543 | rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg); |
| 544 | sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg); |
| 545 | rx_len = sg_dma_len(rx_sg); |
| 546 | } |
| 547 | |
| 548 | len = min(tx_len, rx_len); |
| 549 | |
| 550 | sg_dma_len(&tx_tmp) = len; |
| 551 | sg_dma_len(&rx_tmp) = len; |
| 552 | |
| 553 | /* Submit DMA Tx transfer */ |
| 554 | ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1); |
| 555 | if (ret) |
| 556 | break; |
| 557 | |
| 558 | /* Submit DMA Rx transfer */ |
| 559 | ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1); |
| 560 | if (ret) |
| 561 | break; |
| 562 | |
| 563 | /* Rx must be started before Tx due to SPI instinct */ |
| 564 | dma_async_issue_pending(dws->rxchan); |
| 565 | |
| 566 | dma_async_issue_pending(dws->txchan); |
| 567 | |
| 568 | /* |
| 569 | * Here we only need to wait for the DMA transfer to be |
| 570 | * finished since SPI controller is kept enabled during the |
| 571 | * procedure this loop implements and there is no risk to lose |
| 572 | * data left in the Tx/Rx FIFOs. |
| 573 | */ |
| 574 | ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz); |
| 575 | if (ret) |
| 576 | break; |
| 577 | |
| 578 | reinit_completion(&dws->dma_completion); |
| 579 | |
| 580 | sg_dma_address(&tx_tmp) += len; |
| 581 | sg_dma_address(&rx_tmp) += len; |
| 582 | tx_len -= len; |
| 583 | rx_len -= len; |
| 584 | } |
| 585 | |
| 586 | dw_writel(dws, DW_SPI_DMACR, 0); |
| 587 | |
| 588 | return ret; |
| 589 | } |
| 590 | |
| 591 | static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
| 592 | { |
| 593 | unsigned int nents; |
| 594 | int ret; |
| 595 | |
| 596 | nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents); |
| 597 | |
| 598 | /* |
| 599 | * Execute normal DMA-based transfer (which submits the Rx and Tx SG |
| 600 | * lists directly to the DMA engine at once) if either full hardware |
| 601 | * accelerated SG list traverse is supported by both channels, or the |
| 602 | * Tx-only SPI transfer is requested, or the DMA engine is capable to |
| 603 | * handle both SG lists on hardware accelerated basis. |
| 604 | */ |
| 605 | if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst) |
| 606 | ret = dw_spi_dma_transfer_all(dws, xfer); |
| 607 | else |
| 608 | ret = dw_spi_dma_transfer_one(dws, xfer); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 609 | if (ret) |
| 610 | return ret; |
| 611 | |
Serge Semin | 7ef3038 | 2020-09-20 14:23:13 +0300 | [diff] [blame] | 612 | if (dws->master->cur_msg->status == -EINPROGRESS) { |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 613 | ret = dw_spi_dma_wait_tx_done(dws, xfer); |
| 614 | if (ret) |
| 615 | return ret; |
| 616 | } |
| 617 | |
Serge Semin | be3034d | 2020-09-20 14:23:15 +0300 | [diff] [blame] | 618 | if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS) |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 619 | ret = dw_spi_dma_wait_rx_done(dws); |
| 620 | |
| 621 | return ret; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 622 | } |
| 623 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 624 | static void dw_spi_dma_stop(struct dw_spi *dws) |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 625 | { |
| 626 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 627 | dmaengine_terminate_sync(dws->txchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 628 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 629 | } |
| 630 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 631 | dmaengine_terminate_sync(dws->rxchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 632 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 633 | } |
| 634 | } |
| 635 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 636 | static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { |
| 637 | .dma_init = dw_spi_dma_init_mfld, |
| 638 | .dma_exit = dw_spi_dma_exit, |
| 639 | .dma_setup = dw_spi_dma_setup, |
| 640 | .can_dma = dw_spi_can_dma, |
| 641 | .dma_transfer = dw_spi_dma_transfer, |
| 642 | .dma_stop = dw_spi_dma_stop, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 643 | }; |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 644 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 645 | void dw_spi_dma_setup_mfld(struct dw_spi *dws) |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 646 | { |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 647 | dws->dma_ops = &dw_spi_dma_mfld_ops; |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 648 | } |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 649 | EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld); |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 650 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 651 | static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { |
| 652 | .dma_init = dw_spi_dma_init_generic, |
| 653 | .dma_exit = dw_spi_dma_exit, |
| 654 | .dma_setup = dw_spi_dma_setup, |
| 655 | .can_dma = dw_spi_can_dma, |
| 656 | .dma_transfer = dw_spi_dma_transfer, |
| 657 | .dma_stop = dw_spi_dma_stop, |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 658 | }; |
| 659 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 660 | void dw_spi_dma_setup_generic(struct dw_spi *dws) |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 661 | { |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 662 | dws->dma_ops = &dw_spi_dma_generic_ops; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 663 | } |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 664 | EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic); |