Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 2 | /* |
Serge Semin | 6c710c0 | 2020-05-29 16:11:59 +0300 | [diff] [blame] | 3 | * Special handling for DW DMA core |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 4 | * |
Andy Shevchenko | 197e96b | 2014-09-12 15:12:01 +0300 | [diff] [blame] | 5 | * Copyright (c) 2009, 2014 Intel Corporation. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 6 | */ |
| 7 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 8 | #include <linux/completion.h> |
Andy Shevchenko | e794095 | 2020-05-06 18:30:22 +0300 | [diff] [blame] | 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/dmaengine.h> |
Andy Shevchenko | e62a15d | 2020-05-06 18:30:21 +0300 | [diff] [blame] | 11 | #include <linux/irqreturn.h> |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 12 | #include <linux/jiffies.h> |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 13 | #include <linux/pci.h> |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 14 | #include <linux/platform_data/dma-dw.h> |
Serge Semin | 6c710c0 | 2020-05-29 16:11:59 +0300 | [diff] [blame] | 15 | #include <linux/spi/spi.h> |
| 16 | #include <linux/types.h> |
| 17 | |
| 18 | #include "spi-dw.h" |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 19 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 20 | #define WAIT_RETRIES 5 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 21 | #define RX_BUSY 0 |
Serge Semin | c534df9 | 2020-05-29 16:11:55 +0300 | [diff] [blame] | 22 | #define RX_BURST_LEVEL 16 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 23 | #define TX_BUSY 1 |
Serge Semin | c534df9 | 2020-05-29 16:11:55 +0300 | [diff] [blame] | 24 | #define TX_BURST_LEVEL 16 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 25 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 26 | static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 27 | { |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 28 | struct dw_dma_slave *s = param; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 29 | |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 30 | if (s->dma_dev != chan->device->dev) |
| 31 | return false; |
| 32 | |
| 33 | chan->private = s; |
| 34 | return true; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 35 | } |
| 36 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 37 | static void dw_spi_dma_maxburst_init(struct dw_spi *dws) |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 38 | { |
| 39 | struct dma_slave_caps caps; |
| 40 | u32 max_burst, def_burst; |
| 41 | int ret; |
| 42 | |
| 43 | def_burst = dws->fifo_len / 2; |
| 44 | |
| 45 | ret = dma_get_slave_caps(dws->rxchan, &caps); |
| 46 | if (!ret && caps.max_burst) |
| 47 | max_burst = caps.max_burst; |
| 48 | else |
| 49 | max_burst = RX_BURST_LEVEL; |
| 50 | |
| 51 | dws->rxburst = min(max_burst, def_burst); |
| 52 | |
| 53 | ret = dma_get_slave_caps(dws->txchan, &caps); |
| 54 | if (!ret && caps.max_burst) |
| 55 | max_burst = caps.max_burst; |
| 56 | else |
| 57 | max_burst = TX_BURST_LEVEL; |
| 58 | |
| 59 | dws->txburst = min(max_burst, def_burst); |
| 60 | } |
| 61 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 62 | static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 63 | { |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 64 | struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; |
| 65 | struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 66 | struct pci_dev *dma_dev; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 67 | dma_cap_mask_t mask; |
| 68 | |
| 69 | /* |
| 70 | * Get pci device for DMA controller, currently it could only |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 71 | * be the DMA controller of Medfield |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 72 | */ |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 73 | dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); |
| 74 | if (!dma_dev) |
| 75 | return -ENODEV; |
| 76 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 77 | dma_cap_zero(mask); |
| 78 | dma_cap_set(DMA_SLAVE, mask); |
| 79 | |
| 80 | /* 1. Init rx channel */ |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 81 | rx->dma_dev = &dma_dev->dev; |
| 82 | dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 83 | if (!dws->rxchan) |
| 84 | goto err_exit; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 85 | |
| 86 | /* 2. Init tx channel */ |
Andy Shevchenko | b3f82dc | 2020-05-29 21:31:49 +0300 | [diff] [blame] | 87 | tx->dma_dev = &dma_dev->dev; |
| 88 | dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 89 | if (!dws->txchan) |
| 90 | goto free_rxchan; |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 91 | |
| 92 | dws->master->dma_rx = dws->rxchan; |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 93 | dws->master->dma_tx = dws->txchan; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 94 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 95 | init_completion(&dws->dma_completion); |
| 96 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 97 | dw_spi_dma_maxburst_init(dws); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 98 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 99 | return 0; |
| 100 | |
| 101 | free_rxchan: |
| 102 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 103 | dws->rxchan = NULL; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 104 | err_exit: |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 105 | return -EBUSY; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 106 | } |
| 107 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 108 | static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 109 | { |
| 110 | dws->rxchan = dma_request_slave_channel(dev, "rx"); |
| 111 | if (!dws->rxchan) |
| 112 | return -ENODEV; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 113 | |
| 114 | dws->txchan = dma_request_slave_channel(dev, "tx"); |
| 115 | if (!dws->txchan) { |
| 116 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 117 | dws->rxchan = NULL; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 118 | return -ENODEV; |
| 119 | } |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 120 | |
| 121 | dws->master->dma_rx = dws->rxchan; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 122 | dws->master->dma_tx = dws->txchan; |
| 123 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 124 | init_completion(&dws->dma_completion); |
| 125 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 126 | dw_spi_dma_maxburst_init(dws); |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 127 | |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 128 | return 0; |
| 129 | } |
| 130 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 131 | static void dw_spi_dma_exit(struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 132 | { |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 133 | if (dws->txchan) { |
| 134 | dmaengine_terminate_sync(dws->txchan); |
| 135 | dma_release_channel(dws->txchan); |
| 136 | } |
Andy Shevchenko | 8e45ef6 | 2014-09-18 20:08:53 +0300 | [diff] [blame] | 137 | |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 138 | if (dws->rxchan) { |
| 139 | dmaengine_terminate_sync(dws->rxchan); |
| 140 | dma_release_channel(dws->rxchan); |
| 141 | } |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 142 | |
| 143 | dw_writel(dws, DW_SPI_DMACR, 0); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 144 | } |
| 145 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 146 | static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 147 | { |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 148 | u16 irq_status = dw_readl(dws, DW_SPI_ISR); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 149 | |
| 150 | if (!irq_status) |
| 151 | return IRQ_NONE; |
| 152 | |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 153 | dw_readl(dws, DW_SPI_ICR); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 154 | spi_reset_chip(dws); |
| 155 | |
| 156 | dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); |
| 157 | dws->master->cur_msg->status = -EIO; |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 158 | complete(&dws->dma_completion); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 159 | return IRQ_HANDLED; |
| 160 | } |
| 161 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 162 | static bool dw_spi_can_dma(struct spi_controller *master, |
| 163 | struct spi_device *spi, struct spi_transfer *xfer) |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 164 | { |
Jarkko Nikula | 721483e | 2018-02-01 17:17:29 +0200 | [diff] [blame] | 165 | struct dw_spi *dws = spi_controller_get_devdata(master); |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 166 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 167 | return xfer->len > dws->fifo_len; |
| 168 | } |
| 169 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 170 | static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) |
| 171 | { |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 172 | if (n_bytes == 1) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 173 | return DMA_SLAVE_BUSWIDTH_1_BYTE; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 174 | else if (n_bytes == 2) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 175 | return DMA_SLAVE_BUSWIDTH_2_BYTES; |
| 176 | |
| 177 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; |
| 178 | } |
| 179 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 180 | static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer) |
| 181 | { |
| 182 | unsigned long long ms; |
| 183 | |
| 184 | ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE; |
| 185 | do_div(ms, xfer->effective_speed_hz); |
| 186 | ms += ms + 200; |
| 187 | |
| 188 | if (ms > UINT_MAX) |
| 189 | ms = UINT_MAX; |
| 190 | |
| 191 | ms = wait_for_completion_timeout(&dws->dma_completion, |
| 192 | msecs_to_jiffies(ms)); |
| 193 | |
| 194 | if (ms == 0) { |
| 195 | dev_err(&dws->master->cur_msg->spi->dev, |
| 196 | "DMA transaction timed out\n"); |
| 197 | return -ETIMEDOUT; |
| 198 | } |
| 199 | |
| 200 | return 0; |
| 201 | } |
| 202 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 203 | static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) |
| 204 | { |
| 205 | return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT); |
| 206 | } |
| 207 | |
| 208 | static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, |
| 209 | struct spi_transfer *xfer) |
| 210 | { |
| 211 | int retry = WAIT_RETRIES; |
| 212 | struct spi_delay delay; |
| 213 | u32 nents; |
| 214 | |
| 215 | nents = dw_readl(dws, DW_SPI_TXFLR); |
| 216 | delay.unit = SPI_DELAY_UNIT_SCK; |
| 217 | delay.value = nents * dws->n_bytes * BITS_PER_BYTE; |
| 218 | |
| 219 | while (dw_spi_dma_tx_busy(dws) && retry--) |
| 220 | spi_delay_exec(&delay, xfer); |
| 221 | |
| 222 | if (retry < 0) { |
| 223 | dev_err(&dws->master->dev, "Tx hanged up\n"); |
| 224 | return -EIO; |
| 225 | } |
| 226 | |
| 227 | return 0; |
| 228 | } |
| 229 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 230 | /* |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 231 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
| 232 | * channel will clear a corresponding bit. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 233 | */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 234 | static void dw_spi_dma_tx_done(void *arg) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 235 | { |
| 236 | struct dw_spi *dws = arg; |
| 237 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 238 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 239 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 240 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 241 | |
| 242 | dw_writel(dws, DW_SPI_DMACR, 0); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 243 | complete(&dws->dma_completion); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 244 | } |
| 245 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 246 | static struct dma_async_tx_descriptor * |
| 247 | dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 248 | { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 249 | struct dma_slave_config txconf; |
| 250 | struct dma_async_tx_descriptor *txdesc; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 251 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 252 | if (!xfer->tx_buf) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 253 | return NULL; |
| 254 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 255 | memset(&txconf, 0, sizeof(txconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 256 | txconf.direction = DMA_MEM_TO_DEV; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 257 | txconf.dst_addr = dws->dma_addr; |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 258 | txconf.dst_maxburst = dws->txburst; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 259 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 260 | txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 261 | txconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 262 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 263 | dmaengine_slave_config(dws->txchan, &txconf); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 264 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 265 | txdesc = dmaengine_prep_slave_sg(dws->txchan, |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 266 | xfer->tx_sg.sgl, |
| 267 | xfer->tx_sg.nents, |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 268 | DMA_MEM_TO_DEV, |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 269 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 270 | if (!txdesc) |
| 271 | return NULL; |
| 272 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 273 | txdesc->callback = dw_spi_dma_tx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 274 | txdesc->callback_param = dws; |
| 275 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 276 | return txdesc; |
| 277 | } |
| 278 | |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 279 | static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) |
| 280 | { |
| 281 | return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT); |
| 282 | } |
| 283 | |
| 284 | static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) |
| 285 | { |
| 286 | int retry = WAIT_RETRIES; |
| 287 | struct spi_delay delay; |
| 288 | unsigned long ns, us; |
| 289 | u32 nents; |
| 290 | |
| 291 | /* |
| 292 | * It's unlikely that DMA engine is still doing the data fetching, but |
| 293 | * if it's let's give it some reasonable time. The timeout calculation |
| 294 | * is based on the synchronous APB/SSI reference clock rate, on a |
| 295 | * number of data entries left in the Rx FIFO, times a number of clock |
| 296 | * periods normally needed for a single APB read/write transaction |
| 297 | * without PREADY signal utilized (which is true for the DW APB SSI |
| 298 | * controller). |
| 299 | */ |
| 300 | nents = dw_readl(dws, DW_SPI_RXFLR); |
| 301 | ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; |
| 302 | if (ns <= NSEC_PER_USEC) { |
| 303 | delay.unit = SPI_DELAY_UNIT_NSECS; |
| 304 | delay.value = ns; |
| 305 | } else { |
| 306 | us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
| 307 | delay.unit = SPI_DELAY_UNIT_USECS; |
| 308 | delay.value = clamp_val(us, 0, USHRT_MAX); |
| 309 | } |
| 310 | |
| 311 | while (dw_spi_dma_rx_busy(dws) && retry--) |
| 312 | spi_delay_exec(&delay, NULL); |
| 313 | |
| 314 | if (retry < 0) { |
| 315 | dev_err(&dws->master->dev, "Rx hanged up\n"); |
| 316 | return -EIO; |
| 317 | } |
| 318 | |
| 319 | return 0; |
| 320 | } |
| 321 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 322 | /* |
| 323 | * dws->dma_chan_busy is set before the dma transfer starts, callback for rx |
| 324 | * channel will clear a corresponding bit. |
| 325 | */ |
| 326 | static void dw_spi_dma_rx_done(void *arg) |
| 327 | { |
| 328 | struct dw_spi *dws = arg; |
| 329 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 330 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 331 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 332 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 333 | |
| 334 | dw_writel(dws, DW_SPI_DMACR, 0); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 335 | complete(&dws->dma_completion); |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 336 | } |
| 337 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 338 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, |
| 339 | struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 340 | { |
| 341 | struct dma_slave_config rxconf; |
| 342 | struct dma_async_tx_descriptor *rxdesc; |
| 343 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 344 | if (!xfer->rx_buf) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 345 | return NULL; |
| 346 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 347 | memset(&rxconf, 0, sizeof(rxconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 348 | rxconf.direction = DMA_DEV_TO_MEM; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 349 | rxconf.src_addr = dws->dma_addr; |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 350 | rxconf.src_maxburst = dws->rxburst; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 351 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 352 | rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 353 | rxconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 354 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 355 | dmaengine_slave_config(dws->rxchan, &rxconf); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 356 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 357 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 358 | xfer->rx_sg.sgl, |
| 359 | xfer->rx_sg.nents, |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 360 | DMA_DEV_TO_MEM, |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 361 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 362 | if (!rxdesc) |
| 363 | return NULL; |
| 364 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 365 | rxdesc->callback = dw_spi_dma_rx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 366 | rxdesc->callback_param = dws; |
| 367 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 368 | return rxdesc; |
| 369 | } |
| 370 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 371 | static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 372 | { |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 373 | u16 imr = 0, dma_ctrl = 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 374 | |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame] | 375 | dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); |
| 376 | dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - dws->txburst); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 377 | |
Andy Shevchenko | 3d7db0f | 2020-05-29 21:31:50 +0300 | [diff] [blame] | 378 | if (xfer->tx_buf) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 379 | dma_ctrl |= SPI_DMA_TDMAE; |
Andy Shevchenko | 3d7db0f | 2020-05-29 21:31:50 +0300 | [diff] [blame] | 380 | if (xfer->rx_buf) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 381 | dma_ctrl |= SPI_DMA_RDMAE; |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 382 | dw_writel(dws, DW_SPI_DMACR, dma_ctrl); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 383 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 384 | /* Set the interrupt mask */ |
Andy Shevchenko | 3d7db0f | 2020-05-29 21:31:50 +0300 | [diff] [blame] | 385 | if (xfer->tx_buf) |
| 386 | imr |= SPI_INT_TXOI; |
| 387 | if (xfer->rx_buf) |
| 388 | imr |= SPI_INT_RXUI | SPI_INT_RXOI; |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 389 | spi_umask_intr(dws, imr); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 390 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 391 | reinit_completion(&dws->dma_completion); |
| 392 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 393 | dws->transfer_handler = dw_spi_dma_transfer_handler; |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 394 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 395 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 396 | } |
| 397 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 398 | static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 399 | { |
| 400 | struct dma_async_tx_descriptor *txdesc, *rxdesc; |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 401 | int ret; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 402 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 403 | /* Prepare the TX dma transfer */ |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 404 | txdesc = dw_spi_dma_prepare_tx(dws, xfer); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 405 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 406 | /* Prepare the RX dma transfer */ |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 407 | rxdesc = dw_spi_dma_prepare_rx(dws, xfer); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 408 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 409 | /* rx must be started before tx due to spi instinct */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 410 | if (rxdesc) { |
| 411 | set_bit(RX_BUSY, &dws->dma_chan_busy); |
| 412 | dmaengine_submit(rxdesc); |
| 413 | dma_async_issue_pending(dws->rxchan); |
| 414 | } |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 415 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 416 | if (txdesc) { |
| 417 | set_bit(TX_BUSY, &dws->dma_chan_busy); |
| 418 | dmaengine_submit(txdesc); |
| 419 | dma_async_issue_pending(dws->txchan); |
| 420 | } |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 421 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 422 | ret = dw_spi_dma_wait(dws, xfer); |
| 423 | if (ret) |
| 424 | return ret; |
| 425 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 426 | if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) { |
| 427 | ret = dw_spi_dma_wait_tx_done(dws, xfer); |
| 428 | if (ret) |
| 429 | return ret; |
| 430 | } |
| 431 | |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 432 | if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS) |
| 433 | ret = dw_spi_dma_wait_rx_done(dws); |
| 434 | |
| 435 | return ret; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 436 | } |
| 437 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 438 | static void dw_spi_dma_stop(struct dw_spi *dws) |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 439 | { |
| 440 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 441 | dmaengine_terminate_sync(dws->txchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 442 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 443 | } |
| 444 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 445 | dmaengine_terminate_sync(dws->rxchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 446 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 447 | } |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 448 | |
| 449 | dw_writel(dws, DW_SPI_DMACR, 0); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 450 | } |
| 451 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 452 | static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { |
| 453 | .dma_init = dw_spi_dma_init_mfld, |
| 454 | .dma_exit = dw_spi_dma_exit, |
| 455 | .dma_setup = dw_spi_dma_setup, |
| 456 | .can_dma = dw_spi_can_dma, |
| 457 | .dma_transfer = dw_spi_dma_transfer, |
| 458 | .dma_stop = dw_spi_dma_stop, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 459 | }; |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 460 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 461 | void dw_spi_dma_setup_mfld(struct dw_spi *dws) |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 462 | { |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 463 | dws->dma_ops = &dw_spi_dma_mfld_ops; |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 464 | } |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 465 | EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld); |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 466 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 467 | static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { |
| 468 | .dma_init = dw_spi_dma_init_generic, |
| 469 | .dma_exit = dw_spi_dma_exit, |
| 470 | .dma_setup = dw_spi_dma_setup, |
| 471 | .can_dma = dw_spi_can_dma, |
| 472 | .dma_transfer = dw_spi_dma_transfer, |
| 473 | .dma_stop = dw_spi_dma_stop, |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 474 | }; |
| 475 | |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 476 | void dw_spi_dma_setup_generic(struct dw_spi *dws) |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 477 | { |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 478 | dws->dma_ops = &dw_spi_dma_generic_ops; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 479 | } |
Serge Semin | 5778441 | 2020-05-29 16:12:02 +0300 | [diff] [blame] | 480 | EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic); |