blob: f9757a37069918eadf994b0f6c3006fb1271c0c0 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Feng Tang7063c0d2010-12-24 13:59:11 +08002/*
Grant Likelyca632f52011-06-06 01:16:30 -06003 * Special handling for DW core on Intel MID platform
Feng Tang7063c0d2010-12-24 13:59:11 +08004 *
Andy Shevchenko197e96b2014-09-12 15:12:01 +03005 * Copyright (c) 2009, 2014 Intel Corporation.
Feng Tang7063c0d2010-12-24 13:59:11 +08006 */
7
Feng Tang7063c0d2010-12-24 13:59:11 +08008#include <linux/spi/spi.h>
Viresh Kumar258aea72012-02-01 16:12:19 +05309#include <linux/types.h>
Grant Likely568a60e2011-02-28 12:47:12 -070010
Grant Likelyca632f52011-06-06 01:16:30 -060011#include "spi-dw.h"
Feng Tang7063c0d2010-12-24 13:59:11 +080012
13#ifdef CONFIG_SPI_DW_MID_DMA
Andy Shevchenkoe7940952020-05-06 18:30:22 +030014#include <linux/dma-mapping.h>
15#include <linux/dmaengine.h>
Andy Shevchenkoe62a15d2020-05-06 18:30:21 +030016#include <linux/irqreturn.h>
Feng Tang7063c0d2010-12-24 13:59:11 +080017#include <linux/pci.h>
Andy Shevchenkod744f822015-03-09 16:48:50 +020018#include <linux/platform_data/dma-dw.h>
Feng Tang7063c0d2010-12-24 13:59:11 +080019
Andy Shevchenko30c8eb52014-10-28 18:25:02 +020020#define RX_BUSY 0
21#define TX_BUSY 1
22
Andy Shevchenkod744f822015-03-09 16:48:50 +020023static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 };
24static struct dw_dma_slave mid_dma_rx = { .src_id = 0 };
Feng Tang7063c0d2010-12-24 13:59:11 +080025
26static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
27{
Andy Shevchenkod744f822015-03-09 16:48:50 +020028 struct dw_dma_slave *s = param;
Feng Tang7063c0d2010-12-24 13:59:11 +080029
Andy Shevchenkod744f822015-03-09 16:48:50 +020030 if (s->dma_dev != chan->device->dev)
31 return false;
32
33 chan->private = s;
34 return true;
Feng Tang7063c0d2010-12-24 13:59:11 +080035}
36
Andy Shevchenko6370aba2020-05-06 18:30:24 +030037static int mid_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +080038{
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030039 struct pci_dev *dma_dev;
Andy Shevchenkod744f822015-03-09 16:48:50 +020040 struct dw_dma_slave *tx = dws->dma_tx;
41 struct dw_dma_slave *rx = dws->dma_rx;
Feng Tang7063c0d2010-12-24 13:59:11 +080042 dma_cap_mask_t mask;
43
44 /*
45 * Get pci device for DMA controller, currently it could only
Andy Shevchenkoea092452014-09-12 15:11:59 +030046 * be the DMA controller of Medfield
Feng Tang7063c0d2010-12-24 13:59:11 +080047 */
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030048 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
49 if (!dma_dev)
50 return -ENODEV;
51
Feng Tang7063c0d2010-12-24 13:59:11 +080052 dma_cap_zero(mask);
53 dma_cap_set(DMA_SLAVE, mask);
54
55 /* 1. Init rx channel */
Andy Shevchenkod744f822015-03-09 16:48:50 +020056 rx->dma_dev = &dma_dev->dev;
57 dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx);
Feng Tang7063c0d2010-12-24 13:59:11 +080058 if (!dws->rxchan)
59 goto err_exit;
Feng Tang7063c0d2010-12-24 13:59:11 +080060
61 /* 2. Init tx channel */
Andy Shevchenkod744f822015-03-09 16:48:50 +020062 tx->dma_dev = &dma_dev->dev;
63 dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx);
Feng Tang7063c0d2010-12-24 13:59:11 +080064 if (!dws->txchan)
65 goto free_rxchan;
Andy Shevchenkoa041e672020-05-07 14:54:49 +030066
67 dws->master->dma_rx = dws->rxchan;
Andy Shevchenkof89a6d82015-03-09 16:48:49 +020068 dws->master->dma_tx = dws->txchan;
Feng Tang7063c0d2010-12-24 13:59:11 +080069
Feng Tang7063c0d2010-12-24 13:59:11 +080070 return 0;
71
72free_rxchan:
73 dma_release_channel(dws->rxchan);
Andy Shevchenkoa041e672020-05-07 14:54:49 +030074 dws->rxchan = NULL;
Feng Tang7063c0d2010-12-24 13:59:11 +080075err_exit:
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030076 return -EBUSY;
Feng Tang7063c0d2010-12-24 13:59:11 +080077}
78
Jarkko Nikula22d48ad2020-05-06 18:30:25 +030079static int mid_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
80{
81 dws->rxchan = dma_request_slave_channel(dev, "rx");
82 if (!dws->rxchan)
83 return -ENODEV;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +030084
85 dws->txchan = dma_request_slave_channel(dev, "tx");
86 if (!dws->txchan) {
87 dma_release_channel(dws->rxchan);
Andy Shevchenkoa041e672020-05-07 14:54:49 +030088 dws->rxchan = NULL;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +030089 return -ENODEV;
90 }
Andy Shevchenkoa041e672020-05-07 14:54:49 +030091
92 dws->master->dma_rx = dws->rxchan;
Jarkko Nikula22d48ad2020-05-06 18:30:25 +030093 dws->master->dma_tx = dws->txchan;
94
Jarkko Nikula22d48ad2020-05-06 18:30:25 +030095 return 0;
96}
97
Feng Tang7063c0d2010-12-24 13:59:11 +080098static void mid_spi_dma_exit(struct dw_spi *dws)
99{
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300100 if (dws->txchan) {
101 dmaengine_terminate_sync(dws->txchan);
102 dma_release_channel(dws->txchan);
103 }
Andy Shevchenko8e45ef62014-09-18 20:08:53 +0300104
Andy Shevchenkoa041e672020-05-07 14:54:49 +0300105 if (dws->rxchan) {
106 dmaengine_terminate_sync(dws->rxchan);
107 dma_release_channel(dws->rxchan);
108 }
Serge Semin0327f0b2020-05-15 13:47:42 +0300109
110 dw_writel(dws, DW_SPI_DMACR, 0);
Feng Tang7063c0d2010-12-24 13:59:11 +0800111}
112
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200113static irqreturn_t dma_transfer(struct dw_spi *dws)
114{
Thor Thayerdd114442015-03-12 14:19:31 -0500115 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200116
117 if (!irq_status)
118 return IRQ_NONE;
119
Thor Thayerdd114442015-03-12 14:19:31 -0500120 dw_readl(dws, DW_SPI_ICR);
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200121 spi_reset_chip(dws);
122
123 dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
124 dws->master->cur_msg->status = -EIO;
125 spi_finalize_current_transfer(dws->master);
126 return IRQ_HANDLED;
127}
128
Jarkko Nikula721483e2018-02-01 17:17:29 +0200129static bool mid_spi_can_dma(struct spi_controller *master,
130 struct spi_device *spi, struct spi_transfer *xfer)
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200131{
Jarkko Nikula721483e2018-02-01 17:17:29 +0200132 struct dw_spi *dws = spi_controller_get_devdata(master);
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200133
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200134 return xfer->len > dws->fifo_len;
135}
136
Andy Shevchenkoe31abce2015-03-09 16:48:45 +0200137static enum dma_slave_buswidth convert_dma_width(u32 dma_width) {
138 if (dma_width == 1)
139 return DMA_SLAVE_BUSWIDTH_1_BYTE;
140 else if (dma_width == 2)
141 return DMA_SLAVE_BUSWIDTH_2_BYTES;
142
143 return DMA_SLAVE_BUSWIDTH_UNDEFINED;
144}
145
Feng Tang7063c0d2010-12-24 13:59:11 +0800146/*
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200147 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
148 * channel will clear a corresponding bit.
Feng Tang7063c0d2010-12-24 13:59:11 +0800149 */
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200150static void dw_spi_dma_tx_done(void *arg)
Feng Tang7063c0d2010-12-24 13:59:11 +0800151{
152 struct dw_spi *dws = arg;
153
Andy Shevchenko854d2f22015-03-06 14:42:01 +0200154 clear_bit(TX_BUSY, &dws->dma_chan_busy);
155 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
Feng Tang7063c0d2010-12-24 13:59:11 +0800156 return;
Serge Semin0327f0b2020-05-15 13:47:42 +0300157
158 dw_writel(dws, DW_SPI_DMACR, 0);
Andy Shevchenkoc22c62d2015-03-02 14:58:57 +0200159 spi_finalize_current_transfer(dws->master);
Feng Tang7063c0d2010-12-24 13:59:11 +0800160}
161
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200162static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
163 struct spi_transfer *xfer)
Feng Tang7063c0d2010-12-24 13:59:11 +0800164{
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200165 struct dma_slave_config txconf;
166 struct dma_async_tx_descriptor *txdesc;
Feng Tang7063c0d2010-12-24 13:59:11 +0800167
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200168 if (!xfer->tx_buf)
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200169 return NULL;
170
Andy Shevchenko3cb97e22020-05-06 18:30:18 +0300171 memset(&txconf, 0, sizeof(txconf));
Vinod Koula485df42011-10-14 10:47:38 +0530172 txconf.direction = DMA_MEM_TO_DEV;
Feng Tang7063c0d2010-12-24 13:59:11 +0800173 txconf.dst_addr = dws->dma_addr;
Andy Shevchenkod744f822015-03-09 16:48:50 +0200174 txconf.dst_maxburst = 16;
Feng Tang7063c0d2010-12-24 13:59:11 +0800175 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Andy Shevchenkoe31abce2015-03-09 16:48:45 +0200176 txconf.dst_addr_width = convert_dma_width(dws->dma_width);
Viresh Kumar258aea72012-02-01 16:12:19 +0530177 txconf.device_fc = false;
Feng Tang7063c0d2010-12-24 13:59:11 +0800178
Andy Shevchenko2a285292014-10-02 16:31:08 +0300179 dmaengine_slave_config(dws->txchan, &txconf);
Feng Tang7063c0d2010-12-24 13:59:11 +0800180
Andy Shevchenko2a285292014-10-02 16:31:08 +0300181 txdesc = dmaengine_prep_slave_sg(dws->txchan,
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200182 xfer->tx_sg.sgl,
183 xfer->tx_sg.nents,
Vinod Koula485df42011-10-14 10:47:38 +0530184 DMA_MEM_TO_DEV,
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300185 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200186 if (!txdesc)
187 return NULL;
188
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200189 txdesc->callback = dw_spi_dma_tx_done;
Feng Tang7063c0d2010-12-24 13:59:11 +0800190 txdesc->callback_param = dws;
191
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200192 return txdesc;
193}
194
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200195/*
196 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
197 * channel will clear a corresponding bit.
198 */
199static void dw_spi_dma_rx_done(void *arg)
200{
201 struct dw_spi *dws = arg;
202
Andy Shevchenko854d2f22015-03-06 14:42:01 +0200203 clear_bit(RX_BUSY, &dws->dma_chan_busy);
204 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200205 return;
Serge Semin0327f0b2020-05-15 13:47:42 +0300206
207 dw_writel(dws, DW_SPI_DMACR, 0);
Andy Shevchenkoc22c62d2015-03-02 14:58:57 +0200208 spi_finalize_current_transfer(dws->master);
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200209}
210
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200211static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
212 struct spi_transfer *xfer)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200213{
214 struct dma_slave_config rxconf;
215 struct dma_async_tx_descriptor *rxdesc;
216
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200217 if (!xfer->rx_buf)
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200218 return NULL;
219
Andy Shevchenko3cb97e22020-05-06 18:30:18 +0300220 memset(&rxconf, 0, sizeof(rxconf));
Vinod Koula485df42011-10-14 10:47:38 +0530221 rxconf.direction = DMA_DEV_TO_MEM;
Feng Tang7063c0d2010-12-24 13:59:11 +0800222 rxconf.src_addr = dws->dma_addr;
Andy Shevchenkod744f822015-03-09 16:48:50 +0200223 rxconf.src_maxburst = 16;
Feng Tang7063c0d2010-12-24 13:59:11 +0800224 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Andy Shevchenkoe31abce2015-03-09 16:48:45 +0200225 rxconf.src_addr_width = convert_dma_width(dws->dma_width);
Viresh Kumar258aea72012-02-01 16:12:19 +0530226 rxconf.device_fc = false;
Feng Tang7063c0d2010-12-24 13:59:11 +0800227
Andy Shevchenko2a285292014-10-02 16:31:08 +0300228 dmaengine_slave_config(dws->rxchan, &rxconf);
Feng Tang7063c0d2010-12-24 13:59:11 +0800229
Andy Shevchenko2a285292014-10-02 16:31:08 +0300230 rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200231 xfer->rx_sg.sgl,
232 xfer->rx_sg.nents,
Vinod Koula485df42011-10-14 10:47:38 +0530233 DMA_DEV_TO_MEM,
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300234 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200235 if (!rxdesc)
236 return NULL;
237
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200238 rxdesc->callback = dw_spi_dma_rx_done;
Feng Tang7063c0d2010-12-24 13:59:11 +0800239 rxdesc->callback_param = dws;
240
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200241 return rxdesc;
242}
243
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200244static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200245{
246 u16 dma_ctrl = 0;
247
Thor Thayerdd114442015-03-12 14:19:31 -0500248 dw_writel(dws, DW_SPI_DMARDLR, 0xf);
249 dw_writel(dws, DW_SPI_DMATDLR, 0x10);
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200250
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200251 if (xfer->tx_buf)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200252 dma_ctrl |= SPI_DMA_TDMAE;
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200253 if (xfer->rx_buf)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200254 dma_ctrl |= SPI_DMA_RDMAE;
Thor Thayerdd114442015-03-12 14:19:31 -0500255 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200256
Andy Shevchenkof051fc82015-03-09 16:48:47 +0200257 /* Set the interrupt mask */
258 spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);
259
260 dws->transfer_handler = dma_transfer;
261
Andy Shevchenko9f145382015-03-09 16:48:46 +0200262 return 0;
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200263}
264
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200265static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200266{
267 struct dma_async_tx_descriptor *txdesc, *rxdesc;
268
Andy Shevchenko9f145382015-03-09 16:48:46 +0200269 /* Prepare the TX dma transfer */
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200270 txdesc = dw_spi_dma_prepare_tx(dws, xfer);
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200271
Andy Shevchenko9f145382015-03-09 16:48:46 +0200272 /* Prepare the RX dma transfer */
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200273 rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200274
Feng Tang7063c0d2010-12-24 13:59:11 +0800275 /* rx must be started before tx due to spi instinct */
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200276 if (rxdesc) {
277 set_bit(RX_BUSY, &dws->dma_chan_busy);
278 dmaengine_submit(rxdesc);
279 dma_async_issue_pending(dws->rxchan);
280 }
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300281
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200282 if (txdesc) {
283 set_bit(TX_BUSY, &dws->dma_chan_busy);
284 dmaengine_submit(txdesc);
285 dma_async_issue_pending(dws->txchan);
286 }
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300287
Feng Tang7063c0d2010-12-24 13:59:11 +0800288 return 0;
289}
290
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200291static void mid_spi_dma_stop(struct dw_spi *dws)
292{
293 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
Andy Shevchenkocf1716e2017-01-03 15:48:20 +0200294 dmaengine_terminate_sync(dws->txchan);
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200295 clear_bit(TX_BUSY, &dws->dma_chan_busy);
296 }
297 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
Andy Shevchenkocf1716e2017-01-03 15:48:20 +0200298 dmaengine_terminate_sync(dws->rxchan);
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200299 clear_bit(RX_BUSY, &dws->dma_chan_busy);
300 }
Serge Semin0327f0b2020-05-15 13:47:42 +0300301
302 dw_writel(dws, DW_SPI_DMACR, 0);
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200303}
304
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300305static const struct dw_spi_dma_ops mfld_dma_ops = {
306 .dma_init = mid_spi_dma_init_mfld,
Feng Tang7063c0d2010-12-24 13:59:11 +0800307 .dma_exit = mid_spi_dma_exit,
Andy Shevchenko9f145382015-03-09 16:48:46 +0200308 .dma_setup = mid_spi_dma_setup,
Andy Shevchenkof89a6d82015-03-09 16:48:49 +0200309 .can_dma = mid_spi_can_dma,
Feng Tang7063c0d2010-12-24 13:59:11 +0800310 .dma_transfer = mid_spi_dma_transfer,
Andy Shevchenko4d5ac1e2015-03-09 16:48:48 +0200311 .dma_stop = mid_spi_dma_stop,
Feng Tang7063c0d2010-12-24 13:59:11 +0800312};
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300313
314static void dw_spi_mid_setup_dma_mfld(struct dw_spi *dws)
315{
316 dws->dma_tx = &mid_dma_tx;
317 dws->dma_rx = &mid_dma_rx;
318 dws->dma_ops = &mfld_dma_ops;
319}
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300320
321static const struct dw_spi_dma_ops generic_dma_ops = {
322 .dma_init = mid_spi_dma_init_generic,
323 .dma_exit = mid_spi_dma_exit,
324 .dma_setup = mid_spi_dma_setup,
325 .can_dma = mid_spi_can_dma,
326 .dma_transfer = mid_spi_dma_transfer,
327 .dma_stop = mid_spi_dma_stop,
328};
329
330static void dw_spi_mid_setup_dma_generic(struct dw_spi *dws)
331{
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300332 dws->dma_ops = &generic_dma_ops;
333}
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300334#else /* CONFIG_SPI_DW_MID_DMA */
335static inline void dw_spi_mid_setup_dma_mfld(struct dw_spi *dws) {}
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300336static inline void dw_spi_mid_setup_dma_generic(struct dw_spi *dws) {}
Feng Tang7063c0d2010-12-24 13:59:11 +0800337#endif
338
Andy Shevchenkoea092452014-09-12 15:11:59 +0300339/* Some specific info for SPI0 controller on Intel MID */
Feng Tang7063c0d2010-12-24 13:59:11 +0800340
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200341/* HW info for MRST Clk Control Unit, 32b reg per controller */
Feng Tang7063c0d2010-12-24 13:59:11 +0800342#define MRST_SPI_CLK_BASE 100000000 /* 100m */
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200343#define MRST_CLK_SPI_REG 0xff11d86c
Feng Tang7063c0d2010-12-24 13:59:11 +0800344#define CLK_SPI_BDIV_OFFSET 0
345#define CLK_SPI_BDIV_MASK 0x00000007
346#define CLK_SPI_CDIV_OFFSET 9
347#define CLK_SPI_CDIV_MASK 0x00000e00
348#define CLK_SPI_DISABLE_OFFSET 8
349
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300350int dw_spi_mid_init_mfld(struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +0800351{
H Hartley Sweeten7eb187b2011-09-20 11:06:17 -0700352 void __iomem *clk_reg;
353 u32 clk_cdiv;
Feng Tang7063c0d2010-12-24 13:59:11 +0800354
Christoph Hellwig4bdc0d62020-01-06 09:43:50 +0100355 clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
Feng Tang7063c0d2010-12-24 13:59:11 +0800356 if (!clk_reg)
357 return -ENOMEM;
358
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200359 /* Get SPI controller operating freq info */
360 clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
361 clk_cdiv &= CLK_SPI_CDIV_MASK;
362 clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
Feng Tang7063c0d2010-12-24 13:59:11 +0800363 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200364
Feng Tang7063c0d2010-12-24 13:59:11 +0800365 iounmap(clk_reg);
366
Wan Ahmad Zainiec4eadee2020-05-05 21:06:13 +0800367 /* Register hook to configure CTRLR0 */
368 dws->update_cr0 = dw_spi_update_cr0;
369
Andy Shevchenko37aa8aa2020-05-06 18:30:23 +0300370 dw_spi_mid_setup_dma_mfld(dws);
Feng Tang7063c0d2010-12-24 13:59:11 +0800371 return 0;
372}
Jarkko Nikula22d48ad2020-05-06 18:30:25 +0300373
374int dw_spi_mid_init_generic(struct dw_spi *dws)
375{
376 /* Register hook to configure CTRLR0 */
377 dws->update_cr0 = dw_spi_update_cr0;
378
379 dw_spi_mid_setup_dma_generic(dws);
380 return 0;
381}