Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 2 | /* |
Grant Likely | ca632f5 | 2011-06-06 01:16:30 -0600 | [diff] [blame] | 3 | * Special handling for DW core on Intel MID platform |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 4 | * |
Andy Shevchenko | 197e96b | 2014-09-12 15:12:01 +0300 | [diff] [blame] | 5 | * Copyright (c) 2009, 2014 Intel Corporation. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 6 | */ |
| 7 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 8 | #include <linux/spi/spi.h> |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 9 | #include <linux/types.h> |
Grant Likely | 568a60e | 2011-02-28 12:47:12 -0700 | [diff] [blame] | 10 | |
Grant Likely | ca632f5 | 2011-06-06 01:16:30 -0600 | [diff] [blame] | 11 | #include "spi-dw.h" |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 12 | |
| 13 | #ifdef CONFIG_SPI_DW_MID_DMA |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 14 | #include <linux/completion.h> |
Andy Shevchenko | e794095 | 2020-05-06 18:30:22 +0300 | [diff] [blame] | 15 | #include <linux/dma-mapping.h> |
| 16 | #include <linux/dmaengine.h> |
Andy Shevchenko | e62a15d | 2020-05-06 18:30:21 +0300 | [diff] [blame] | 17 | #include <linux/irqreturn.h> |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 18 | #include <linux/jiffies.h> |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 19 | #include <linux/pci.h> |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 20 | #include <linux/platform_data/dma-dw.h> |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 21 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 22 | #define WAIT_RETRIES 5 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 23 | #define RX_BUSY 0 |
Serge Semin | c534df9 | 2020-05-29 16:11:55 +0300 | [diff] [blame] | 24 | #define RX_BURST_LEVEL 16 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 25 | #define TX_BUSY 1 |
Serge Semin | c534df9 | 2020-05-29 16:11:55 +0300 | [diff] [blame] | 26 | #define TX_BURST_LEVEL 16 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 27 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 28 | static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
| 29 | { |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 30 | struct dw_dma_slave *s = param; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 31 | |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 32 | if (s->dma_dev != chan->device->dev) |
| 33 | return false; |
| 34 | |
| 35 | chan->private = s; |
| 36 | return true; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 37 | } |
| 38 | |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame^] | 39 | static void mid_spi_maxburst_init(struct dw_spi *dws) |
| 40 | { |
| 41 | struct dma_slave_caps caps; |
| 42 | u32 max_burst, def_burst; |
| 43 | int ret; |
| 44 | |
| 45 | def_burst = dws->fifo_len / 2; |
| 46 | |
| 47 | ret = dma_get_slave_caps(dws->rxchan, &caps); |
| 48 | if (!ret && caps.max_burst) |
| 49 | max_burst = caps.max_burst; |
| 50 | else |
| 51 | max_burst = RX_BURST_LEVEL; |
| 52 | |
| 53 | dws->rxburst = min(max_burst, def_burst); |
| 54 | |
| 55 | ret = dma_get_slave_caps(dws->txchan, &caps); |
| 56 | if (!ret && caps.max_burst) |
| 57 | max_burst = caps.max_burst; |
| 58 | else |
| 59 | max_burst = TX_BURST_LEVEL; |
| 60 | |
| 61 | dws->txburst = min(max_burst, def_burst); |
| 62 | } |
| 63 | |
Andy Shevchenko | 6370aba | 2020-05-06 18:30:24 +0300 | [diff] [blame] | 64 | static int mid_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 65 | { |
Serge Semin | 2afccbd | 2020-05-22 03:07:52 +0300 | [diff] [blame] | 66 | struct dw_dma_slave slave = { |
| 67 | .src_id = 0, |
| 68 | .dst_id = 0 |
| 69 | }; |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 70 | struct pci_dev *dma_dev; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 71 | dma_cap_mask_t mask; |
| 72 | |
| 73 | /* |
| 74 | * Get pci device for DMA controller, currently it could only |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 75 | * be the DMA controller of Medfield |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 76 | */ |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 77 | dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); |
| 78 | if (!dma_dev) |
| 79 | return -ENODEV; |
| 80 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 81 | dma_cap_zero(mask); |
| 82 | dma_cap_set(DMA_SLAVE, mask); |
| 83 | |
| 84 | /* 1. Init rx channel */ |
Serge Semin | 2afccbd | 2020-05-22 03:07:52 +0300 | [diff] [blame] | 85 | slave.dma_dev = &dma_dev->dev; |
| 86 | dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, &slave); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 87 | if (!dws->rxchan) |
| 88 | goto err_exit; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 89 | |
| 90 | /* 2. Init tx channel */ |
Serge Semin | 2afccbd | 2020-05-22 03:07:52 +0300 | [diff] [blame] | 91 | slave.dst_id = 1; |
| 92 | dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, &slave); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 93 | if (!dws->txchan) |
| 94 | goto free_rxchan; |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 95 | |
| 96 | dws->master->dma_rx = dws->rxchan; |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 97 | dws->master->dma_tx = dws->txchan; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 98 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 99 | init_completion(&dws->dma_completion); |
| 100 | |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame^] | 101 | mid_spi_maxburst_init(dws); |
| 102 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 103 | return 0; |
| 104 | |
| 105 | free_rxchan: |
| 106 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 107 | dws->rxchan = NULL; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 108 | err_exit: |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 109 | return -EBUSY; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 110 | } |
| 111 | |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 112 | static int mid_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) |
| 113 | { |
| 114 | dws->rxchan = dma_request_slave_channel(dev, "rx"); |
| 115 | if (!dws->rxchan) |
| 116 | return -ENODEV; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 117 | |
| 118 | dws->txchan = dma_request_slave_channel(dev, "tx"); |
| 119 | if (!dws->txchan) { |
| 120 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 121 | dws->rxchan = NULL; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 122 | return -ENODEV; |
| 123 | } |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 124 | |
| 125 | dws->master->dma_rx = dws->rxchan; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 126 | dws->master->dma_tx = dws->txchan; |
| 127 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 128 | init_completion(&dws->dma_completion); |
| 129 | |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame^] | 130 | mid_spi_maxburst_init(dws); |
| 131 | |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 132 | return 0; |
| 133 | } |
| 134 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 135 | static void mid_spi_dma_exit(struct dw_spi *dws) |
| 136 | { |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 137 | if (dws->txchan) { |
| 138 | dmaengine_terminate_sync(dws->txchan); |
| 139 | dma_release_channel(dws->txchan); |
| 140 | } |
Andy Shevchenko | 8e45ef6 | 2014-09-18 20:08:53 +0300 | [diff] [blame] | 141 | |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 142 | if (dws->rxchan) { |
| 143 | dmaengine_terminate_sync(dws->rxchan); |
| 144 | dma_release_channel(dws->rxchan); |
| 145 | } |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 146 | |
| 147 | dw_writel(dws, DW_SPI_DMACR, 0); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 148 | } |
| 149 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 150 | static irqreturn_t dma_transfer(struct dw_spi *dws) |
| 151 | { |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 152 | u16 irq_status = dw_readl(dws, DW_SPI_ISR); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 153 | |
| 154 | if (!irq_status) |
| 155 | return IRQ_NONE; |
| 156 | |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 157 | dw_readl(dws, DW_SPI_ICR); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 158 | spi_reset_chip(dws); |
| 159 | |
| 160 | dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); |
| 161 | dws->master->cur_msg->status = -EIO; |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 162 | complete(&dws->dma_completion); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 163 | return IRQ_HANDLED; |
| 164 | } |
| 165 | |
Jarkko Nikula | 721483e | 2018-02-01 17:17:29 +0200 | [diff] [blame] | 166 | static bool mid_spi_can_dma(struct spi_controller *master, |
| 167 | struct spi_device *spi, struct spi_transfer *xfer) |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 168 | { |
Jarkko Nikula | 721483e | 2018-02-01 17:17:29 +0200 | [diff] [blame] | 169 | struct dw_spi *dws = spi_controller_get_devdata(master); |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 170 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 171 | return xfer->len > dws->fifo_len; |
| 172 | } |
| 173 | |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 174 | static enum dma_slave_buswidth convert_dma_width(u8 n_bytes) { |
| 175 | if (n_bytes == 1) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 176 | return DMA_SLAVE_BUSWIDTH_1_BYTE; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 177 | else if (n_bytes == 2) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 178 | return DMA_SLAVE_BUSWIDTH_2_BYTES; |
| 179 | |
| 180 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; |
| 181 | } |
| 182 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 183 | static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer) |
| 184 | { |
| 185 | unsigned long long ms; |
| 186 | |
| 187 | ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE; |
| 188 | do_div(ms, xfer->effective_speed_hz); |
| 189 | ms += ms + 200; |
| 190 | |
| 191 | if (ms > UINT_MAX) |
| 192 | ms = UINT_MAX; |
| 193 | |
| 194 | ms = wait_for_completion_timeout(&dws->dma_completion, |
| 195 | msecs_to_jiffies(ms)); |
| 196 | |
| 197 | if (ms == 0) { |
| 198 | dev_err(&dws->master->cur_msg->spi->dev, |
| 199 | "DMA transaction timed out\n"); |
| 200 | return -ETIMEDOUT; |
| 201 | } |
| 202 | |
| 203 | return 0; |
| 204 | } |
| 205 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 206 | static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) |
| 207 | { |
| 208 | return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT); |
| 209 | } |
| 210 | |
| 211 | static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, |
| 212 | struct spi_transfer *xfer) |
| 213 | { |
| 214 | int retry = WAIT_RETRIES; |
| 215 | struct spi_delay delay; |
| 216 | u32 nents; |
| 217 | |
| 218 | nents = dw_readl(dws, DW_SPI_TXFLR); |
| 219 | delay.unit = SPI_DELAY_UNIT_SCK; |
| 220 | delay.value = nents * dws->n_bytes * BITS_PER_BYTE; |
| 221 | |
| 222 | while (dw_spi_dma_tx_busy(dws) && retry--) |
| 223 | spi_delay_exec(&delay, xfer); |
| 224 | |
| 225 | if (retry < 0) { |
| 226 | dev_err(&dws->master->dev, "Tx hanged up\n"); |
| 227 | return -EIO; |
| 228 | } |
| 229 | |
| 230 | return 0; |
| 231 | } |
| 232 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 233 | /* |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 234 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
| 235 | * channel will clear a corresponding bit. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 236 | */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 237 | static void dw_spi_dma_tx_done(void *arg) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 238 | { |
| 239 | struct dw_spi *dws = arg; |
| 240 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 241 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 242 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 243 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 244 | |
| 245 | dw_writel(dws, DW_SPI_DMACR, 0); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 246 | complete(&dws->dma_completion); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 247 | } |
| 248 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 249 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, |
| 250 | struct spi_transfer *xfer) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 251 | { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 252 | struct dma_slave_config txconf; |
| 253 | struct dma_async_tx_descriptor *txdesc; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 254 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 255 | if (!xfer->tx_buf) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 256 | return NULL; |
| 257 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 258 | memset(&txconf, 0, sizeof(txconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 259 | txconf.direction = DMA_MEM_TO_DEV; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 260 | txconf.dst_addr = dws->dma_addr; |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame^] | 261 | txconf.dst_maxburst = dws->txburst; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 262 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 263 | txconf.dst_addr_width = convert_dma_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 264 | txconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 265 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 266 | dmaengine_slave_config(dws->txchan, &txconf); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 267 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 268 | txdesc = dmaengine_prep_slave_sg(dws->txchan, |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 269 | xfer->tx_sg.sgl, |
| 270 | xfer->tx_sg.nents, |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 271 | DMA_MEM_TO_DEV, |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 272 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 273 | if (!txdesc) |
| 274 | return NULL; |
| 275 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 276 | txdesc->callback = dw_spi_dma_tx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 277 | txdesc->callback_param = dws; |
| 278 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 279 | return txdesc; |
| 280 | } |
| 281 | |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 282 | static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) |
| 283 | { |
| 284 | return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT); |
| 285 | } |
| 286 | |
| 287 | static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) |
| 288 | { |
| 289 | int retry = WAIT_RETRIES; |
| 290 | struct spi_delay delay; |
| 291 | unsigned long ns, us; |
| 292 | u32 nents; |
| 293 | |
| 294 | /* |
| 295 | * It's unlikely that DMA engine is still doing the data fetching, but |
| 296 | * if it's let's give it some reasonable time. The timeout calculation |
| 297 | * is based on the synchronous APB/SSI reference clock rate, on a |
| 298 | * number of data entries left in the Rx FIFO, times a number of clock |
| 299 | * periods normally needed for a single APB read/write transaction |
| 300 | * without PREADY signal utilized (which is true for the DW APB SSI |
| 301 | * controller). |
| 302 | */ |
| 303 | nents = dw_readl(dws, DW_SPI_RXFLR); |
| 304 | ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; |
| 305 | if (ns <= NSEC_PER_USEC) { |
| 306 | delay.unit = SPI_DELAY_UNIT_NSECS; |
| 307 | delay.value = ns; |
| 308 | } else { |
| 309 | us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
| 310 | delay.unit = SPI_DELAY_UNIT_USECS; |
| 311 | delay.value = clamp_val(us, 0, USHRT_MAX); |
| 312 | } |
| 313 | |
| 314 | while (dw_spi_dma_rx_busy(dws) && retry--) |
| 315 | spi_delay_exec(&delay, NULL); |
| 316 | |
| 317 | if (retry < 0) { |
| 318 | dev_err(&dws->master->dev, "Rx hanged up\n"); |
| 319 | return -EIO; |
| 320 | } |
| 321 | |
| 322 | return 0; |
| 323 | } |
| 324 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 325 | /* |
| 326 | * dws->dma_chan_busy is set before the dma transfer starts, callback for rx |
| 327 | * channel will clear a corresponding bit. |
| 328 | */ |
| 329 | static void dw_spi_dma_rx_done(void *arg) |
| 330 | { |
| 331 | struct dw_spi *dws = arg; |
| 332 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 333 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 334 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 335 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 336 | |
| 337 | dw_writel(dws, DW_SPI_DMACR, 0); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 338 | complete(&dws->dma_completion); |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 339 | } |
| 340 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 341 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, |
| 342 | struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 343 | { |
| 344 | struct dma_slave_config rxconf; |
| 345 | struct dma_async_tx_descriptor *rxdesc; |
| 346 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 347 | if (!xfer->rx_buf) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 348 | return NULL; |
| 349 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 350 | memset(&rxconf, 0, sizeof(rxconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 351 | rxconf.direction = DMA_DEV_TO_MEM; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 352 | rxconf.src_addr = dws->dma_addr; |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame^] | 353 | rxconf.src_maxburst = dws->rxburst; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 354 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 355 | rxconf.src_addr_width = convert_dma_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 356 | rxconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 357 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 358 | dmaengine_slave_config(dws->rxchan, &rxconf); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 359 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 360 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 361 | xfer->rx_sg.sgl, |
| 362 | xfer->rx_sg.nents, |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 363 | DMA_DEV_TO_MEM, |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 364 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 365 | if (!rxdesc) |
| 366 | return NULL; |
| 367 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 368 | rxdesc->callback = dw_spi_dma_rx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 369 | rxdesc->callback_param = dws; |
| 370 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 371 | return rxdesc; |
| 372 | } |
| 373 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 374 | static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 375 | { |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 376 | u16 imr = 0, dma_ctrl = 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 377 | |
Serge Semin | 0b2b665 | 2020-05-29 16:11:56 +0300 | [diff] [blame^] | 378 | dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); |
| 379 | dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - dws->txburst); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 380 | |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 381 | if (xfer->tx_buf) { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 382 | dma_ctrl |= SPI_DMA_TDMAE; |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 383 | imr |= SPI_INT_TXOI; |
| 384 | } |
| 385 | if (xfer->rx_buf) { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 386 | dma_ctrl |= SPI_DMA_RDMAE; |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 387 | imr |= SPI_INT_RXUI | SPI_INT_RXOI; |
| 388 | } |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 389 | dw_writel(dws, DW_SPI_DMACR, dma_ctrl); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 390 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 391 | /* Set the interrupt mask */ |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 392 | spi_umask_intr(dws, imr); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 393 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 394 | reinit_completion(&dws->dma_completion); |
| 395 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 396 | dws->transfer_handler = dma_transfer; |
| 397 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 398 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 399 | } |
| 400 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 401 | static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 402 | { |
| 403 | struct dma_async_tx_descriptor *txdesc, *rxdesc; |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 404 | int ret; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 405 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 406 | /* Prepare the TX dma transfer */ |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 407 | txdesc = dw_spi_dma_prepare_tx(dws, xfer); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 408 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 409 | /* Prepare the RX dma transfer */ |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 410 | rxdesc = dw_spi_dma_prepare_rx(dws, xfer); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 411 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 412 | /* rx must be started before tx due to spi instinct */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 413 | if (rxdesc) { |
| 414 | set_bit(RX_BUSY, &dws->dma_chan_busy); |
| 415 | dmaengine_submit(rxdesc); |
| 416 | dma_async_issue_pending(dws->rxchan); |
| 417 | } |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 418 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 419 | if (txdesc) { |
| 420 | set_bit(TX_BUSY, &dws->dma_chan_busy); |
| 421 | dmaengine_submit(txdesc); |
| 422 | dma_async_issue_pending(dws->txchan); |
| 423 | } |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 424 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 425 | ret = dw_spi_dma_wait(dws, xfer); |
| 426 | if (ret) |
| 427 | return ret; |
| 428 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 429 | if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) { |
| 430 | ret = dw_spi_dma_wait_tx_done(dws, xfer); |
| 431 | if (ret) |
| 432 | return ret; |
| 433 | } |
| 434 | |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame] | 435 | if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS) |
| 436 | ret = dw_spi_dma_wait_rx_done(dws); |
| 437 | |
| 438 | return ret; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 439 | } |
| 440 | |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 441 | static void mid_spi_dma_stop(struct dw_spi *dws) |
| 442 | { |
| 443 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 444 | dmaengine_terminate_sync(dws->txchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 445 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 446 | } |
| 447 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 448 | dmaengine_terminate_sync(dws->rxchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 449 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 450 | } |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 451 | |
| 452 | dw_writel(dws, DW_SPI_DMACR, 0); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 453 | } |
| 454 | |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 455 | static const struct dw_spi_dma_ops mfld_dma_ops = { |
| 456 | .dma_init = mid_spi_dma_init_mfld, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 457 | .dma_exit = mid_spi_dma_exit, |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 458 | .dma_setup = mid_spi_dma_setup, |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 459 | .can_dma = mid_spi_can_dma, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 460 | .dma_transfer = mid_spi_dma_transfer, |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 461 | .dma_stop = mid_spi_dma_stop, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 462 | }; |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 463 | |
| 464 | static void dw_spi_mid_setup_dma_mfld(struct dw_spi *dws) |
| 465 | { |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 466 | dws->dma_ops = &mfld_dma_ops; |
| 467 | } |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 468 | |
| 469 | static const struct dw_spi_dma_ops generic_dma_ops = { |
| 470 | .dma_init = mid_spi_dma_init_generic, |
| 471 | .dma_exit = mid_spi_dma_exit, |
| 472 | .dma_setup = mid_spi_dma_setup, |
| 473 | .can_dma = mid_spi_can_dma, |
| 474 | .dma_transfer = mid_spi_dma_transfer, |
| 475 | .dma_stop = mid_spi_dma_stop, |
| 476 | }; |
| 477 | |
| 478 | static void dw_spi_mid_setup_dma_generic(struct dw_spi *dws) |
| 479 | { |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 480 | dws->dma_ops = &generic_dma_ops; |
| 481 | } |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 482 | #else /* CONFIG_SPI_DW_MID_DMA */ |
| 483 | static inline void dw_spi_mid_setup_dma_mfld(struct dw_spi *dws) {} |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 484 | static inline void dw_spi_mid_setup_dma_generic(struct dw_spi *dws) {} |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 485 | #endif |
| 486 | |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 487 | /* Some specific info for SPI0 controller on Intel MID */ |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 488 | |
Andy Shevchenko | d9c1474 | 2015-01-22 17:59:34 +0200 | [diff] [blame] | 489 | /* HW info for MRST Clk Control Unit, 32b reg per controller */ |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 490 | #define MRST_SPI_CLK_BASE 100000000 /* 100m */ |
Andy Shevchenko | d9c1474 | 2015-01-22 17:59:34 +0200 | [diff] [blame] | 491 | #define MRST_CLK_SPI_REG 0xff11d86c |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 492 | #define CLK_SPI_BDIV_OFFSET 0 |
| 493 | #define CLK_SPI_BDIV_MASK 0x00000007 |
| 494 | #define CLK_SPI_CDIV_OFFSET 9 |
| 495 | #define CLK_SPI_CDIV_MASK 0x00000e00 |
| 496 | #define CLK_SPI_DISABLE_OFFSET 8 |
| 497 | |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 498 | int dw_spi_mid_init_mfld(struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 499 | { |
H Hartley Sweeten | 7eb187b | 2011-09-20 11:06:17 -0700 | [diff] [blame] | 500 | void __iomem *clk_reg; |
| 501 | u32 clk_cdiv; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 502 | |
Christoph Hellwig | 4bdc0d6 | 2020-01-06 09:43:50 +0100 | [diff] [blame] | 503 | clk_reg = ioremap(MRST_CLK_SPI_REG, 16); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 504 | if (!clk_reg) |
| 505 | return -ENOMEM; |
| 506 | |
Andy Shevchenko | d9c1474 | 2015-01-22 17:59:34 +0200 | [diff] [blame] | 507 | /* Get SPI controller operating freq info */ |
| 508 | clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32)); |
| 509 | clk_cdiv &= CLK_SPI_CDIV_MASK; |
| 510 | clk_cdiv >>= CLK_SPI_CDIV_OFFSET; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 511 | dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); |
Andy Shevchenko | d9c1474 | 2015-01-22 17:59:34 +0200 | [diff] [blame] | 512 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 513 | iounmap(clk_reg); |
| 514 | |
Wan Ahmad Zainie | c4eadee | 2020-05-05 21:06:13 +0800 | [diff] [blame] | 515 | /* Register hook to configure CTRLR0 */ |
| 516 | dws->update_cr0 = dw_spi_update_cr0; |
| 517 | |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 518 | dw_spi_mid_setup_dma_mfld(dws); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 519 | return 0; |
| 520 | } |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 521 | |
| 522 | int dw_spi_mid_init_generic(struct dw_spi *dws) |
| 523 | { |
| 524 | /* Register hook to configure CTRLR0 */ |
| 525 | dws->update_cr0 = dw_spi_update_cr0; |
| 526 | |
| 527 | dw_spi_mid_setup_dma_generic(dws); |
| 528 | return 0; |
| 529 | } |