Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 2 | /* |
Grant Likely | ca632f5 | 2011-06-06 01:16:30 -0600 | [diff] [blame] | 3 | * Special handling for DW core on Intel MID platform |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 4 | * |
Andy Shevchenko | 197e96b | 2014-09-12 15:12:01 +0300 | [diff] [blame] | 5 | * Copyright (c) 2009, 2014 Intel Corporation. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 6 | */ |
| 7 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 8 | #include <linux/spi/spi.h> |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 9 | #include <linux/types.h> |
Grant Likely | 568a60e | 2011-02-28 12:47:12 -0700 | [diff] [blame] | 10 | |
Grant Likely | ca632f5 | 2011-06-06 01:16:30 -0600 | [diff] [blame] | 11 | #include "spi-dw.h" |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 12 | |
| 13 | #ifdef CONFIG_SPI_DW_MID_DMA |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 14 | #include <linux/completion.h> |
Andy Shevchenko | e794095 | 2020-05-06 18:30:22 +0300 | [diff] [blame] | 15 | #include <linux/dma-mapping.h> |
| 16 | #include <linux/dmaengine.h> |
Andy Shevchenko | e62a15d | 2020-05-06 18:30:21 +0300 | [diff] [blame] | 17 | #include <linux/irqreturn.h> |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 18 | #include <linux/jiffies.h> |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 19 | #include <linux/pci.h> |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 20 | #include <linux/platform_data/dma-dw.h> |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 21 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 22 | #define WAIT_RETRIES 5 |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 23 | #define RX_BUSY 0 |
| 24 | #define TX_BUSY 1 |
| 25 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 26 | static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
| 27 | { |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 28 | struct dw_dma_slave *s = param; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 29 | |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 30 | if (s->dma_dev != chan->device->dev) |
| 31 | return false; |
| 32 | |
| 33 | chan->private = s; |
| 34 | return true; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 35 | } |
| 36 | |
Andy Shevchenko | 6370aba | 2020-05-06 18:30:24 +0300 | [diff] [blame] | 37 | static int mid_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 38 | { |
Serge Semin | 2afccbd | 2020-05-22 03:07:52 +0300 | [diff] [blame] | 39 | struct dw_dma_slave slave = { |
| 40 | .src_id = 0, |
| 41 | .dst_id = 0 |
| 42 | }; |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 43 | struct pci_dev *dma_dev; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 44 | dma_cap_mask_t mask; |
| 45 | |
| 46 | /* |
| 47 | * Get pci device for DMA controller, currently it could only |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 48 | * be the DMA controller of Medfield |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 49 | */ |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 50 | dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); |
| 51 | if (!dma_dev) |
| 52 | return -ENODEV; |
| 53 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 54 | dma_cap_zero(mask); |
| 55 | dma_cap_set(DMA_SLAVE, mask); |
| 56 | |
| 57 | /* 1. Init rx channel */ |
Serge Semin | 2afccbd | 2020-05-22 03:07:52 +0300 | [diff] [blame] | 58 | slave.dma_dev = &dma_dev->dev; |
| 59 | dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, &slave); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 60 | if (!dws->rxchan) |
| 61 | goto err_exit; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 62 | |
| 63 | /* 2. Init tx channel */ |
Serge Semin | 2afccbd | 2020-05-22 03:07:52 +0300 | [diff] [blame] | 64 | slave.dst_id = 1; |
| 65 | dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, &slave); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 66 | if (!dws->txchan) |
| 67 | goto free_rxchan; |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 68 | |
| 69 | dws->master->dma_rx = dws->rxchan; |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 70 | dws->master->dma_tx = dws->txchan; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 71 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 72 | init_completion(&dws->dma_completion); |
| 73 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 74 | return 0; |
| 75 | |
| 76 | free_rxchan: |
| 77 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 78 | dws->rxchan = NULL; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 79 | err_exit: |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 80 | return -EBUSY; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 81 | } |
| 82 | |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 83 | static int mid_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) |
| 84 | { |
| 85 | dws->rxchan = dma_request_slave_channel(dev, "rx"); |
| 86 | if (!dws->rxchan) |
| 87 | return -ENODEV; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 88 | |
| 89 | dws->txchan = dma_request_slave_channel(dev, "tx"); |
| 90 | if (!dws->txchan) { |
| 91 | dma_release_channel(dws->rxchan); |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 92 | dws->rxchan = NULL; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 93 | return -ENODEV; |
| 94 | } |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 95 | |
| 96 | dws->master->dma_rx = dws->rxchan; |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 97 | dws->master->dma_tx = dws->txchan; |
| 98 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 99 | init_completion(&dws->dma_completion); |
| 100 | |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 101 | return 0; |
| 102 | } |
| 103 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 104 | static void mid_spi_dma_exit(struct dw_spi *dws) |
| 105 | { |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 106 | if (dws->txchan) { |
| 107 | dmaengine_terminate_sync(dws->txchan); |
| 108 | dma_release_channel(dws->txchan); |
| 109 | } |
Andy Shevchenko | 8e45ef6 | 2014-09-18 20:08:53 +0300 | [diff] [blame] | 110 | |
Andy Shevchenko | a041e67 | 2020-05-07 14:54:49 +0300 | [diff] [blame] | 111 | if (dws->rxchan) { |
| 112 | dmaengine_terminate_sync(dws->rxchan); |
| 113 | dma_release_channel(dws->rxchan); |
| 114 | } |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 115 | |
| 116 | dw_writel(dws, DW_SPI_DMACR, 0); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 117 | } |
| 118 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 119 | static irqreturn_t dma_transfer(struct dw_spi *dws) |
| 120 | { |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 121 | u16 irq_status = dw_readl(dws, DW_SPI_ISR); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 122 | |
| 123 | if (!irq_status) |
| 124 | return IRQ_NONE; |
| 125 | |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 126 | dw_readl(dws, DW_SPI_ICR); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 127 | spi_reset_chip(dws); |
| 128 | |
| 129 | dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); |
| 130 | dws->master->cur_msg->status = -EIO; |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 131 | complete(&dws->dma_completion); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 132 | return IRQ_HANDLED; |
| 133 | } |
| 134 | |
Jarkko Nikula | 721483e | 2018-02-01 17:17:29 +0200 | [diff] [blame] | 135 | static bool mid_spi_can_dma(struct spi_controller *master, |
| 136 | struct spi_device *spi, struct spi_transfer *xfer) |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 137 | { |
Jarkko Nikula | 721483e | 2018-02-01 17:17:29 +0200 | [diff] [blame] | 138 | struct dw_spi *dws = spi_controller_get_devdata(master); |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 139 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 140 | return xfer->len > dws->fifo_len; |
| 141 | } |
| 142 | |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 143 | static enum dma_slave_buswidth convert_dma_width(u8 n_bytes) { |
| 144 | if (n_bytes == 1) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 145 | return DMA_SLAVE_BUSWIDTH_1_BYTE; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 146 | else if (n_bytes == 2) |
Andy Shevchenko | e31abce | 2015-03-09 16:48:45 +0200 | [diff] [blame] | 147 | return DMA_SLAVE_BUSWIDTH_2_BYTES; |
| 148 | |
| 149 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; |
| 150 | } |
| 151 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 152 | static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer) |
| 153 | { |
| 154 | unsigned long long ms; |
| 155 | |
| 156 | ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE; |
| 157 | do_div(ms, xfer->effective_speed_hz); |
| 158 | ms += ms + 200; |
| 159 | |
| 160 | if (ms > UINT_MAX) |
| 161 | ms = UINT_MAX; |
| 162 | |
| 163 | ms = wait_for_completion_timeout(&dws->dma_completion, |
| 164 | msecs_to_jiffies(ms)); |
| 165 | |
| 166 | if (ms == 0) { |
| 167 | dev_err(&dws->master->cur_msg->spi->dev, |
| 168 | "DMA transaction timed out\n"); |
| 169 | return -ETIMEDOUT; |
| 170 | } |
| 171 | |
| 172 | return 0; |
| 173 | } |
| 174 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 175 | static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) |
| 176 | { |
| 177 | return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT); |
| 178 | } |
| 179 | |
| 180 | static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, |
| 181 | struct spi_transfer *xfer) |
| 182 | { |
| 183 | int retry = WAIT_RETRIES; |
| 184 | struct spi_delay delay; |
| 185 | u32 nents; |
| 186 | |
| 187 | nents = dw_readl(dws, DW_SPI_TXFLR); |
| 188 | delay.unit = SPI_DELAY_UNIT_SCK; |
| 189 | delay.value = nents * dws->n_bytes * BITS_PER_BYTE; |
| 190 | |
| 191 | while (dw_spi_dma_tx_busy(dws) && retry--) |
| 192 | spi_delay_exec(&delay, xfer); |
| 193 | |
| 194 | if (retry < 0) { |
| 195 | dev_err(&dws->master->dev, "Tx hanged up\n"); |
| 196 | return -EIO; |
| 197 | } |
| 198 | |
| 199 | return 0; |
| 200 | } |
| 201 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 202 | /* |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 203 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
| 204 | * channel will clear a corresponding bit. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 205 | */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 206 | static void dw_spi_dma_tx_done(void *arg) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 207 | { |
| 208 | struct dw_spi *dws = arg; |
| 209 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 210 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 211 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 212 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 213 | |
| 214 | dw_writel(dws, DW_SPI_DMACR, 0); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 215 | complete(&dws->dma_completion); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 216 | } |
| 217 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 218 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, |
| 219 | struct spi_transfer *xfer) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 220 | { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 221 | struct dma_slave_config txconf; |
| 222 | struct dma_async_tx_descriptor *txdesc; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 223 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 224 | if (!xfer->tx_buf) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 225 | return NULL; |
| 226 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 227 | memset(&txconf, 0, sizeof(txconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 228 | txconf.direction = DMA_MEM_TO_DEV; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 229 | txconf.dst_addr = dws->dma_addr; |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 230 | txconf.dst_maxburst = 16; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 231 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 232 | txconf.dst_addr_width = convert_dma_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 233 | txconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 234 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 235 | dmaengine_slave_config(dws->txchan, &txconf); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 236 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 237 | txdesc = dmaengine_prep_slave_sg(dws->txchan, |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 238 | xfer->tx_sg.sgl, |
| 239 | xfer->tx_sg.nents, |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 240 | DMA_MEM_TO_DEV, |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 241 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 242 | if (!txdesc) |
| 243 | return NULL; |
| 244 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 245 | txdesc->callback = dw_spi_dma_tx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 246 | txdesc->callback_param = dws; |
| 247 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 248 | return txdesc; |
| 249 | } |
| 250 | |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame^] | 251 | static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) |
| 252 | { |
| 253 | return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT); |
| 254 | } |
| 255 | |
| 256 | static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) |
| 257 | { |
| 258 | int retry = WAIT_RETRIES; |
| 259 | struct spi_delay delay; |
| 260 | unsigned long ns, us; |
| 261 | u32 nents; |
| 262 | |
| 263 | /* |
| 264 | * It's unlikely that DMA engine is still doing the data fetching, but |
| 265 | * if it's let's give it some reasonable time. The timeout calculation |
| 266 | * is based on the synchronous APB/SSI reference clock rate, on a |
| 267 | * number of data entries left in the Rx FIFO, times a number of clock |
| 268 | * periods normally needed for a single APB read/write transaction |
| 269 | * without PREADY signal utilized (which is true for the DW APB SSI |
| 270 | * controller). |
| 271 | */ |
| 272 | nents = dw_readl(dws, DW_SPI_RXFLR); |
| 273 | ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; |
| 274 | if (ns <= NSEC_PER_USEC) { |
| 275 | delay.unit = SPI_DELAY_UNIT_NSECS; |
| 276 | delay.value = ns; |
| 277 | } else { |
| 278 | us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
| 279 | delay.unit = SPI_DELAY_UNIT_USECS; |
| 280 | delay.value = clamp_val(us, 0, USHRT_MAX); |
| 281 | } |
| 282 | |
| 283 | while (dw_spi_dma_rx_busy(dws) && retry--) |
| 284 | spi_delay_exec(&delay, NULL); |
| 285 | |
| 286 | if (retry < 0) { |
| 287 | dev_err(&dws->master->dev, "Rx hanged up\n"); |
| 288 | return -EIO; |
| 289 | } |
| 290 | |
| 291 | return 0; |
| 292 | } |
| 293 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 294 | /* |
| 295 | * dws->dma_chan_busy is set before the dma transfer starts, callback for rx |
| 296 | * channel will clear a corresponding bit. |
| 297 | */ |
| 298 | static void dw_spi_dma_rx_done(void *arg) |
| 299 | { |
| 300 | struct dw_spi *dws = arg; |
| 301 | |
Andy Shevchenko | 854d2f2 | 2015-03-06 14:42:01 +0200 | [diff] [blame] | 302 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 303 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 304 | return; |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 305 | |
| 306 | dw_writel(dws, DW_SPI_DMACR, 0); |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 307 | complete(&dws->dma_completion); |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 308 | } |
| 309 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 310 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, |
| 311 | struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 312 | { |
| 313 | struct dma_slave_config rxconf; |
| 314 | struct dma_async_tx_descriptor *rxdesc; |
| 315 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 316 | if (!xfer->rx_buf) |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 317 | return NULL; |
| 318 | |
Andy Shevchenko | 3cb97e2 | 2020-05-06 18:30:18 +0300 | [diff] [blame] | 319 | memset(&rxconf, 0, sizeof(rxconf)); |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 320 | rxconf.direction = DMA_DEV_TO_MEM; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 321 | rxconf.src_addr = dws->dma_addr; |
Andy Shevchenko | d744f82 | 2015-03-09 16:48:50 +0200 | [diff] [blame] | 322 | rxconf.src_maxburst = 16; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 323 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Serge Semin | 4fdc03a | 2020-05-22 03:07:54 +0300 | [diff] [blame] | 324 | rxconf.src_addr_width = convert_dma_width(dws->n_bytes); |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 325 | rxconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 326 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 327 | dmaengine_slave_config(dws->rxchan, &rxconf); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 328 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 329 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 330 | xfer->rx_sg.sgl, |
| 331 | xfer->rx_sg.nents, |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 332 | DMA_DEV_TO_MEM, |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 333 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Andy Shevchenko | c9dafb2 | 2015-03-02 20:15:58 +0200 | [diff] [blame] | 334 | if (!rxdesc) |
| 335 | return NULL; |
| 336 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 337 | rxdesc->callback = dw_spi_dma_rx_done; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 338 | rxdesc->callback_param = dws; |
| 339 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 340 | return rxdesc; |
| 341 | } |
| 342 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 343 | static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 344 | { |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 345 | u16 imr = 0, dma_ctrl = 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 346 | |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 347 | dw_writel(dws, DW_SPI_DMARDLR, 0xf); |
| 348 | dw_writel(dws, DW_SPI_DMATDLR, 0x10); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 349 | |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 350 | if (xfer->tx_buf) { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 351 | dma_ctrl |= SPI_DMA_TDMAE; |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 352 | imr |= SPI_INT_TXOI; |
| 353 | } |
| 354 | if (xfer->rx_buf) { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 355 | dma_ctrl |= SPI_DMA_RDMAE; |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 356 | imr |= SPI_INT_RXUI | SPI_INT_RXOI; |
| 357 | } |
Thor Thayer | dd11444 | 2015-03-12 14:19:31 -0500 | [diff] [blame] | 358 | dw_writel(dws, DW_SPI_DMACR, dma_ctrl); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 359 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 360 | /* Set the interrupt mask */ |
Serge Semin | 43dba9f | 2020-05-22 03:07:51 +0300 | [diff] [blame] | 361 | spi_umask_intr(dws, imr); |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 362 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 363 | reinit_completion(&dws->dma_completion); |
| 364 | |
Andy Shevchenko | f051fc8 | 2015-03-09 16:48:47 +0200 | [diff] [blame] | 365 | dws->transfer_handler = dma_transfer; |
| 366 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 367 | return 0; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 368 | } |
| 369 | |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 370 | static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 371 | { |
| 372 | struct dma_async_tx_descriptor *txdesc, *rxdesc; |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 373 | int ret; |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 374 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 375 | /* Prepare the TX dma transfer */ |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 376 | txdesc = dw_spi_dma_prepare_tx(dws, xfer); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 377 | |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 378 | /* Prepare the RX dma transfer */ |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 379 | rxdesc = dw_spi_dma_prepare_rx(dws, xfer); |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame] | 380 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 381 | /* rx must be started before tx due to spi instinct */ |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 382 | if (rxdesc) { |
| 383 | set_bit(RX_BUSY, &dws->dma_chan_busy); |
| 384 | dmaengine_submit(rxdesc); |
| 385 | dma_async_issue_pending(dws->rxchan); |
| 386 | } |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 387 | |
Andy Shevchenko | 30c8eb5 | 2014-10-28 18:25:02 +0200 | [diff] [blame] | 388 | if (txdesc) { |
| 389 | set_bit(TX_BUSY, &dws->dma_chan_busy); |
| 390 | dmaengine_submit(txdesc); |
| 391 | dma_async_issue_pending(dws->txchan); |
| 392 | } |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 393 | |
Serge Semin | bdbdf0f | 2020-05-29 16:11:52 +0300 | [diff] [blame] | 394 | ret = dw_spi_dma_wait(dws, xfer); |
| 395 | if (ret) |
| 396 | return ret; |
| 397 | |
Serge Semin | 1ade2d8 | 2020-05-29 16:11:53 +0300 | [diff] [blame] | 398 | if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) { |
| 399 | ret = dw_spi_dma_wait_tx_done(dws, xfer); |
| 400 | if (ret) |
| 401 | return ret; |
| 402 | } |
| 403 | |
Serge Semin | 33726ef | 2020-05-29 16:11:54 +0300 | [diff] [blame^] | 404 | if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS) |
| 405 | ret = dw_spi_dma_wait_rx_done(dws); |
| 406 | |
| 407 | return ret; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 408 | } |
| 409 | |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 410 | static void mid_spi_dma_stop(struct dw_spi *dws) |
| 411 | { |
| 412 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 413 | dmaengine_terminate_sync(dws->txchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 414 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
| 415 | } |
| 416 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { |
Andy Shevchenko | cf1716e | 2017-01-03 15:48:20 +0200 | [diff] [blame] | 417 | dmaengine_terminate_sync(dws->rxchan); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 418 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
| 419 | } |
Serge Semin | 0327f0b | 2020-05-15 13:47:42 +0300 | [diff] [blame] | 420 | |
| 421 | dw_writel(dws, DW_SPI_DMACR, 0); |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 422 | } |
| 423 | |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 424 | static const struct dw_spi_dma_ops mfld_dma_ops = { |
| 425 | .dma_init = mid_spi_dma_init_mfld, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 426 | .dma_exit = mid_spi_dma_exit, |
Andy Shevchenko | 9f14538 | 2015-03-09 16:48:46 +0200 | [diff] [blame] | 427 | .dma_setup = mid_spi_dma_setup, |
Andy Shevchenko | f89a6d8 | 2015-03-09 16:48:49 +0200 | [diff] [blame] | 428 | .can_dma = mid_spi_can_dma, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 429 | .dma_transfer = mid_spi_dma_transfer, |
Andy Shevchenko | 4d5ac1e | 2015-03-09 16:48:48 +0200 | [diff] [blame] | 430 | .dma_stop = mid_spi_dma_stop, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 431 | }; |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 432 | |
| 433 | static void dw_spi_mid_setup_dma_mfld(struct dw_spi *dws) |
| 434 | { |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 435 | dws->dma_ops = &mfld_dma_ops; |
| 436 | } |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 437 | |
| 438 | static const struct dw_spi_dma_ops generic_dma_ops = { |
| 439 | .dma_init = mid_spi_dma_init_generic, |
| 440 | .dma_exit = mid_spi_dma_exit, |
| 441 | .dma_setup = mid_spi_dma_setup, |
| 442 | .can_dma = mid_spi_can_dma, |
| 443 | .dma_transfer = mid_spi_dma_transfer, |
| 444 | .dma_stop = mid_spi_dma_stop, |
| 445 | }; |
| 446 | |
| 447 | static void dw_spi_mid_setup_dma_generic(struct dw_spi *dws) |
| 448 | { |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 449 | dws->dma_ops = &generic_dma_ops; |
| 450 | } |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 451 | #else /* CONFIG_SPI_DW_MID_DMA */ |
| 452 | static inline void dw_spi_mid_setup_dma_mfld(struct dw_spi *dws) {} |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 453 | static inline void dw_spi_mid_setup_dma_generic(struct dw_spi *dws) {} |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 454 | #endif |
| 455 | |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 456 | /* Some specific info for SPI0 controller on Intel MID */ |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 457 | |
Andy Shevchenko | d9c1474 | 2015-01-22 17:59:34 +0200 | [diff] [blame] | 458 | /* HW info for MRST Clk Control Unit, 32b reg per controller */ |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 459 | #define MRST_SPI_CLK_BASE 100000000 /* 100m */ |
Andy Shevchenko | d9c1474 | 2015-01-22 17:59:34 +0200 | [diff] [blame] | 460 | #define MRST_CLK_SPI_REG 0xff11d86c |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 461 | #define CLK_SPI_BDIV_OFFSET 0 |
| 462 | #define CLK_SPI_BDIV_MASK 0x00000007 |
| 463 | #define CLK_SPI_CDIV_OFFSET 9 |
| 464 | #define CLK_SPI_CDIV_MASK 0x00000e00 |
| 465 | #define CLK_SPI_DISABLE_OFFSET 8 |
| 466 | |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 467 | int dw_spi_mid_init_mfld(struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 468 | { |
H Hartley Sweeten | 7eb187b | 2011-09-20 11:06:17 -0700 | [diff] [blame] | 469 | void __iomem *clk_reg; |
| 470 | u32 clk_cdiv; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 471 | |
Christoph Hellwig | 4bdc0d6 | 2020-01-06 09:43:50 +0100 | [diff] [blame] | 472 | clk_reg = ioremap(MRST_CLK_SPI_REG, 16); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 473 | if (!clk_reg) |
| 474 | return -ENOMEM; |
| 475 | |
Andy Shevchenko | d9c1474 | 2015-01-22 17:59:34 +0200 | [diff] [blame] | 476 | /* Get SPI controller operating freq info */ |
| 477 | clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32)); |
| 478 | clk_cdiv &= CLK_SPI_CDIV_MASK; |
| 479 | clk_cdiv >>= CLK_SPI_CDIV_OFFSET; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 480 | dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); |
Andy Shevchenko | d9c1474 | 2015-01-22 17:59:34 +0200 | [diff] [blame] | 481 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 482 | iounmap(clk_reg); |
| 483 | |
Wan Ahmad Zainie | c4eadee | 2020-05-05 21:06:13 +0800 | [diff] [blame] | 484 | /* Register hook to configure CTRLR0 */ |
| 485 | dws->update_cr0 = dw_spi_update_cr0; |
| 486 | |
Andy Shevchenko | 37aa8aa | 2020-05-06 18:30:23 +0300 | [diff] [blame] | 487 | dw_spi_mid_setup_dma_mfld(dws); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 488 | return 0; |
| 489 | } |
Jarkko Nikula | 22d48ad | 2020-05-06 18:30:25 +0300 | [diff] [blame] | 490 | |
| 491 | int dw_spi_mid_init_generic(struct dw_spi *dws) |
| 492 | { |
| 493 | /* Register hook to configure CTRLR0 */ |
| 494 | dws->update_cr0 = dw_spi_update_cr0; |
| 495 | |
| 496 | dw_spi_mid_setup_dma_generic(dws); |
| 497 | return 0; |
| 498 | } |