blob: ff7456be9d6d058690ebb9a4c879c9c0c912c86a [file] [log] [blame]
Chao Fu349ad662013-08-16 11:08:55 +08001/*
2 * drivers/spi/spi-fsl-dspi.c
3 *
4 * Copyright 2013 Freescale Semiconductor, Inc.
5 *
6 * Freescale DSPI driver
7 * This file contains a driver for the Freescale DSPI
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 */
15
Xiubo Lia3108362014-09-29 10:57:06 +080016#include <linux/clk.h>
17#include <linux/delay.h>
Sanchayan Maity90ba3702016-11-10 17:49:15 +053018#include <linux/dmaengine.h>
19#include <linux/dma-mapping.h>
Xiubo Lia3108362014-09-29 10:57:06 +080020#include <linux/err.h>
21#include <linux/errno.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
Chao Fu349ad662013-08-16 11:08:55 +080024#include <linux/kernel.h>
Aaron Brice95bf15f2015-04-03 13:39:31 -070025#include <linux/math64.h>
Chao Fu349ad662013-08-16 11:08:55 +080026#include <linux/module.h>
Chao Fu349ad662013-08-16 11:08:55 +080027#include <linux/of.h>
28#include <linux/of_device.h>
Mirza Krak432a17d2015-06-12 18:55:22 +020029#include <linux/pinctrl/consumer.h>
Xiubo Lia3108362014-09-29 10:57:06 +080030#include <linux/platform_device.h>
31#include <linux/pm_runtime.h>
32#include <linux/regmap.h>
33#include <linux/sched.h>
34#include <linux/spi/spi.h>
Angelo Dureghelloec7ed772017-10-28 00:23:01 +020035#include <linux/spi/spi-fsl-dspi.h>
Xiubo Lia3108362014-09-29 10:57:06 +080036#include <linux/spi/spi_bitbang.h>
Aaron Brice95bf15f2015-04-03 13:39:31 -070037#include <linux/time.h>
Chao Fu349ad662013-08-16 11:08:55 +080038
39#define DRIVER_NAME "fsl-dspi"
40
Chao Fu349ad662013-08-16 11:08:55 +080041#define DSPI_FIFO_SIZE 4
Sanchayan Maity90ba3702016-11-10 17:49:15 +053042#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
Chao Fu349ad662013-08-16 11:08:55 +080043
44#define SPI_MCR 0x00
45#define SPI_MCR_MASTER (1 << 31)
46#define SPI_MCR_PCSIS (0x3F << 16)
47#define SPI_MCR_CLR_TXF (1 << 11)
48#define SPI_MCR_CLR_RXF (1 << 10)
Esben Haabendal3e7cc622018-06-20 09:34:42 +020049#define SPI_MCR_XSPI (1 << 3)
Chao Fu349ad662013-08-16 11:08:55 +080050
51#define SPI_TCR 0x08
Haikun Wangc042af92015-06-09 19:45:37 +080052#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
Chao Fu349ad662013-08-16 11:08:55 +080053
Alexander Stein5cc7b042014-11-04 09:20:18 +010054#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
Chao Fu349ad662013-08-16 11:08:55 +080055#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
56#define SPI_CTAR_CPOL(x) ((x) << 26)
57#define SPI_CTAR_CPHA(x) ((x) << 25)
58#define SPI_CTAR_LSBFE(x) ((x) << 24)
Aaron Brice95bf15f2015-04-03 13:39:31 -070059#define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22)
Chao Fu349ad662013-08-16 11:08:55 +080060#define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
61#define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
62#define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
63#define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
64#define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
65#define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
66#define SPI_CTAR_BR(x) ((x) & 0x0000000f)
Aaron Brice95bf15f2015-04-03 13:39:31 -070067#define SPI_CTAR_SCALE_BITS 0xf
Chao Fu349ad662013-08-16 11:08:55 +080068
69#define SPI_CTAR0_SLAVE 0x0c
70
71#define SPI_SR 0x2c
72#define SPI_SR_EOQF 0x10000000
Haikun Wangd1f4a382015-06-09 19:45:27 +080073#define SPI_SR_TCFQF 0x80000000
Yuan Yao5ee67b52016-10-17 18:02:34 +080074#define SPI_SR_CLEAR 0xdaad0000
Chao Fu349ad662013-08-16 11:08:55 +080075
Sanchayan Maity90ba3702016-11-10 17:49:15 +053076#define SPI_RSER_TFFFE BIT(25)
77#define SPI_RSER_TFFFD BIT(24)
78#define SPI_RSER_RFDFE BIT(17)
79#define SPI_RSER_RFDFD BIT(16)
Chao Fu349ad662013-08-16 11:08:55 +080080
81#define SPI_RSER 0x30
82#define SPI_RSER_EOQFE 0x10000000
Haikun Wangd1f4a382015-06-09 19:45:27 +080083#define SPI_RSER_TCFQE 0x80000000
Chao Fu349ad662013-08-16 11:08:55 +080084
85#define SPI_PUSHR 0x34
Esben Haabendal9e1dc9b2018-06-20 09:34:33 +020086#define SPI_PUSHR_CMD_CONT (1 << 15)
87#define SPI_PUSHR_CONT (SPI_PUSHR_CMD_CONT << 16)
88#define SPI_PUSHR_CMD_CTAS(x) (((x) & 0x0003) << 12)
89#define SPI_PUSHR_CTAS(x) (SPI_PUSHR_CMD_CTAS(x) << 16)
90#define SPI_PUSHR_CMD_EOQ (1 << 11)
91#define SPI_PUSHR_EOQ (SPI_PUSHR_CMD_EOQ << 16)
92#define SPI_PUSHR_CMD_CTCNT (1 << 10)
93#define SPI_PUSHR_CTCNT (SPI_PUSHR_CMD_CTCNT << 16)
94#define SPI_PUSHR_CMD_PCS(x) ((1 << x) & 0x003f)
95#define SPI_PUSHR_PCS(x) (SPI_PUSHR_CMD_PCS(x) << 16)
Chao Fu349ad662013-08-16 11:08:55 +080096#define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
97
98#define SPI_PUSHR_SLAVE 0x34
99
100#define SPI_POPR 0x38
101#define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
102
103#define SPI_TXFR0 0x3c
104#define SPI_TXFR1 0x40
105#define SPI_TXFR2 0x44
106#define SPI_TXFR3 0x48
107#define SPI_RXFR0 0x7c
108#define SPI_RXFR1 0x80
109#define SPI_RXFR2 0x84
110#define SPI_RXFR3 0x88
111
Esben Haabendal58ba07ec2018-06-20 09:34:38 +0200112#define SPI_CTARE(x) (0x11c + (((x) & 0x3) * 4))
113#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
114#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
115
116#define SPI_SREX 0x13c
117
Chao Fu349ad662013-08-16 11:08:55 +0800118#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
119#define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
120#define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
121#define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
122
Esben Haabendal51d583a2018-06-20 09:34:39 +0200123#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
124#define SPI_FRAME_EBITS_MASK SPI_CTARE_FMSZE(1)
125
Esben Haabendal58ba07ec2018-06-20 09:34:38 +0200126/* Register offsets for regmap_pushr */
127#define PUSHR_CMD 0x0
128#define PUSHR_TX 0x2
129
Chao Fu349ad662013-08-16 11:08:55 +0800130#define SPI_CS_INIT 0x01
131#define SPI_CS_ASSERT 0x02
132#define SPI_CS_DROP 0x04
133
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530134#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
135
Chao Fu349ad662013-08-16 11:08:55 +0800136struct chip_data {
Chao Fu349ad662013-08-16 11:08:55 +0800137 u32 ctar_val;
138 u16 void_write_data;
139};
140
Haikun Wangd1f4a382015-06-09 19:45:27 +0800141enum dspi_trans_mode {
142 DSPI_EOQ_MODE = 0,
143 DSPI_TCFQ_MODE,
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530144 DSPI_DMA_MODE,
Haikun Wangd1f4a382015-06-09 19:45:27 +0800145};
146
147struct fsl_dspi_devtype_data {
148 enum dspi_trans_mode trans_mode;
Bhuvanchandra DV9419b202016-03-22 01:41:52 +0530149 u8 max_clock_factor;
Esben Haabendal58ba07ec2018-06-20 09:34:38 +0200150 bool xspi_mode;
Haikun Wangd1f4a382015-06-09 19:45:27 +0800151};
152
153static const struct fsl_dspi_devtype_data vf610_data = {
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530154 .trans_mode = DSPI_DMA_MODE,
Bhuvanchandra DV9419b202016-03-22 01:41:52 +0530155 .max_clock_factor = 2,
Haikun Wangd1f4a382015-06-09 19:45:27 +0800156};
157
158static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
159 .trans_mode = DSPI_TCFQ_MODE,
Bhuvanchandra DV9419b202016-03-22 01:41:52 +0530160 .max_clock_factor = 8,
Esben Haabendal58ba07ec2018-06-20 09:34:38 +0200161 .xspi_mode = true,
Haikun Wangd1f4a382015-06-09 19:45:27 +0800162};
163
164static const struct fsl_dspi_devtype_data ls2085a_data = {
165 .trans_mode = DSPI_TCFQ_MODE,
Bhuvanchandra DV9419b202016-03-22 01:41:52 +0530166 .max_clock_factor = 8,
Haikun Wangd1f4a382015-06-09 19:45:27 +0800167};
168
Angelo Dureghelloec7ed772017-10-28 00:23:01 +0200169static const struct fsl_dspi_devtype_data coldfire_data = {
170 .trans_mode = DSPI_EOQ_MODE,
171 .max_clock_factor = 8,
172};
173
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530174struct fsl_dspi_dma {
Sanchayan Maity1eaccf22016-11-22 12:31:30 +0530175 /* Length of transfer in words of DSPI_FIFO_SIZE */
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530176 u32 curr_xfer_len;
177
178 u32 *tx_dma_buf;
179 struct dma_chan *chan_tx;
180 dma_addr_t tx_dma_phys;
181 struct completion cmd_tx_complete;
182 struct dma_async_tx_descriptor *tx_desc;
183
184 u32 *rx_dma_buf;
185 struct dma_chan *chan_rx;
186 dma_addr_t rx_dma_phys;
187 struct completion cmd_rx_complete;
188 struct dma_async_tx_descriptor *rx_desc;
189};
190
Chao Fu349ad662013-08-16 11:08:55 +0800191struct fsl_dspi {
Chao Fu9298bc72015-01-27 16:27:22 +0530192 struct spi_master *master;
Chao Fu349ad662013-08-16 11:08:55 +0800193 struct platform_device *pdev;
194
Chao Fu1acbdeb2014-02-12 15:29:05 +0800195 struct regmap *regmap;
Esben Haabendal58ba07ec2018-06-20 09:34:38 +0200196 struct regmap *regmap_pushr;
Chao Fu349ad662013-08-16 11:08:55 +0800197 int irq;
Chao Fu88386e82014-02-12 15:29:06 +0800198 struct clk *clk;
Chao Fu349ad662013-08-16 11:08:55 +0800199
Chao Fu88386e82014-02-12 15:29:06 +0800200 struct spi_transfer *cur_transfer;
Chao Fu9298bc72015-01-27 16:27:22 +0530201 struct spi_message *cur_msg;
Chao Fu349ad662013-08-16 11:08:55 +0800202 struct chip_data *cur_chip;
203 size_t len;
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200204 const void *tx;
Chao Fu349ad662013-08-16 11:08:55 +0800205 void *rx;
206 void *rx_end;
Chao Fu349ad662013-08-16 11:08:55 +0800207 u16 void_write_data;
Esben Haabendal9e1dc9b2018-06-20 09:34:33 +0200208 u16 tx_cmd;
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200209 u8 bits_per_word;
210 u8 bytes_per_word;
LABBE Corentin94b968b2016-08-16 11:50:20 +0200211 const struct fsl_dspi_devtype_data *devtype_data;
Chao Fu349ad662013-08-16 11:08:55 +0800212
Chao Fu88386e82014-02-12 15:29:06 +0800213 wait_queue_head_t waitq;
214 u32 waitflags;
Haikun Wangc042af92015-06-09 19:45:37 +0800215
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530216 struct fsl_dspi_dma *dma;
Chao Fu349ad662013-08-16 11:08:55 +0800217};
218
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200219static u32 dspi_pop_tx(struct fsl_dspi *dspi)
Chao Fu349ad662013-08-16 11:08:55 +0800220{
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200221 u32 txdata = 0;
Chao Fu349ad662013-08-16 11:08:55 +0800222
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200223 if (dspi->tx) {
224 if (dspi->bytes_per_word == 1)
225 txdata = *(u8 *)dspi->tx;
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200226 else if (dspi->bytes_per_word == 2)
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200227 txdata = *(u16 *)dspi->tx;
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200228 else /* dspi->bytes_per_word == 4 */
229 txdata = *(u32 *)dspi->tx;
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200230 dspi->tx += dspi->bytes_per_word;
231 }
232 dspi->len -= dspi->bytes_per_word;
233 return txdata;
234}
Chao Fu349ad662013-08-16 11:08:55 +0800235
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200236static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
237{
238 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
239
240 if (dspi->len > 0)
241 cmd |= SPI_PUSHR_CMD_CONT;
242 return cmd << 16 | data;
243}
244
245static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
246{
247 if (!dspi->rx)
248 return;
249
250 /* Mask of undefined bits */
251 rxdata &= (1 << dspi->bits_per_word) - 1;
252
253 if (dspi->bytes_per_word == 1)
254 *(u8 *)dspi->rx = rxdata;
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200255 else if (dspi->bytes_per_word == 2)
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200256 *(u16 *)dspi->rx = rxdata;
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200257 else /* dspi->bytes_per_word == 4 */
258 *(u32 *)dspi->rx = rxdata;
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200259 dspi->rx += dspi->bytes_per_word;
Chao Fu349ad662013-08-16 11:08:55 +0800260}
261
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530262static void dspi_tx_dma_callback(void *arg)
263{
264 struct fsl_dspi *dspi = arg;
265 struct fsl_dspi_dma *dma = dspi->dma;
266
267 complete(&dma->cmd_tx_complete);
268}
269
270static void dspi_rx_dma_callback(void *arg)
271{
272 struct fsl_dspi *dspi = arg;
273 struct fsl_dspi_dma *dma = dspi->dma;
Sanchayan Maity1eaccf22016-11-22 12:31:30 +0530274 int i;
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530275
Esben Haabendal4779f232018-06-20 09:34:32 +0200276 if (dspi->rx) {
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200277 for (i = 0; i < dma->curr_xfer_len; i++)
278 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530279 }
280
281 complete(&dma->cmd_rx_complete);
282}
283
284static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
285{
286 struct fsl_dspi_dma *dma = dspi->dma;
287 struct device *dev = &dspi->pdev->dev;
288 int time_left;
Sanchayan Maity1eaccf22016-11-22 12:31:30 +0530289 int i;
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530290
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200291 for (i = 0; i < dma->curr_xfer_len; i++)
292 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530293
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530294 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
295 dma->tx_dma_phys,
Sanchayan Maity1eaccf22016-11-22 12:31:30 +0530296 dma->curr_xfer_len *
297 DMA_SLAVE_BUSWIDTH_4_BYTES,
298 DMA_MEM_TO_DEV,
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530299 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
300 if (!dma->tx_desc) {
301 dev_err(dev, "Not able to get desc for DMA xfer\n");
302 return -EIO;
303 }
304
305 dma->tx_desc->callback = dspi_tx_dma_callback;
306 dma->tx_desc->callback_param = dspi;
307 if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
308 dev_err(dev, "DMA submit failed\n");
309 return -EINVAL;
310 }
311
312 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
313 dma->rx_dma_phys,
Sanchayan Maity1eaccf22016-11-22 12:31:30 +0530314 dma->curr_xfer_len *
315 DMA_SLAVE_BUSWIDTH_4_BYTES,
316 DMA_DEV_TO_MEM,
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530317 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
318 if (!dma->rx_desc) {
319 dev_err(dev, "Not able to get desc for DMA xfer\n");
320 return -EIO;
321 }
322
323 dma->rx_desc->callback = dspi_rx_dma_callback;
324 dma->rx_desc->callback_param = dspi;
325 if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
326 dev_err(dev, "DMA submit failed\n");
327 return -EINVAL;
328 }
329
330 reinit_completion(&dspi->dma->cmd_rx_complete);
331 reinit_completion(&dspi->dma->cmd_tx_complete);
332
333 dma_async_issue_pending(dma->chan_rx);
334 dma_async_issue_pending(dma->chan_tx);
335
336 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
337 DMA_COMPLETION_TIMEOUT);
338 if (time_left == 0) {
339 dev_err(dev, "DMA tx timeout\n");
340 dmaengine_terminate_all(dma->chan_tx);
341 dmaengine_terminate_all(dma->chan_rx);
342 return -ETIMEDOUT;
343 }
344
345 time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
346 DMA_COMPLETION_TIMEOUT);
347 if (time_left == 0) {
348 dev_err(dev, "DMA rx timeout\n");
349 dmaengine_terminate_all(dma->chan_tx);
350 dmaengine_terminate_all(dma->chan_rx);
351 return -ETIMEDOUT;
352 }
353
354 return 0;
355}
356
357static int dspi_dma_xfer(struct fsl_dspi *dspi)
358{
359 struct fsl_dspi_dma *dma = dspi->dma;
360 struct device *dev = &dspi->pdev->dev;
361 int curr_remaining_bytes;
362 int bytes_per_buffer;
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530363 int ret = 0;
364
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530365 curr_remaining_bytes = dspi->len;
Sanchayan Maity1eaccf22016-11-22 12:31:30 +0530366 bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530367 while (curr_remaining_bytes) {
368 /* Check if current transfer fits the DMA buffer */
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200369 dma->curr_xfer_len = curr_remaining_bytes
370 / dspi->bytes_per_word;
Sanchayan Maity1eaccf22016-11-22 12:31:30 +0530371 if (dma->curr_xfer_len > bytes_per_buffer)
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530372 dma->curr_xfer_len = bytes_per_buffer;
373
374 ret = dspi_next_xfer_dma_submit(dspi);
375 if (ret) {
376 dev_err(dev, "DMA transfer failed\n");
377 goto exit;
378
379 } else {
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200380 curr_remaining_bytes -= dma->curr_xfer_len
381 * dspi->bytes_per_word;
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530382 if (curr_remaining_bytes < 0)
383 curr_remaining_bytes = 0;
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530384 }
385 }
386
387exit:
388 return ret;
389}
390
391static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
392{
393 struct fsl_dspi_dma *dma;
394 struct dma_slave_config cfg;
395 struct device *dev = &dspi->pdev->dev;
396 int ret;
397
398 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
399 if (!dma)
400 return -ENOMEM;
401
402 dma->chan_rx = dma_request_slave_channel(dev, "rx");
403 if (!dma->chan_rx) {
404 dev_err(dev, "rx dma channel not available\n");
405 ret = -ENODEV;
406 return ret;
407 }
408
409 dma->chan_tx = dma_request_slave_channel(dev, "tx");
410 if (!dma->chan_tx) {
411 dev_err(dev, "tx dma channel not available\n");
412 ret = -ENODEV;
413 goto err_tx_channel;
414 }
415
416 dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
417 &dma->tx_dma_phys, GFP_KERNEL);
418 if (!dma->tx_dma_buf) {
419 ret = -ENOMEM;
420 goto err_tx_dma_buf;
421 }
422
423 dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
424 &dma->rx_dma_phys, GFP_KERNEL);
425 if (!dma->rx_dma_buf) {
426 ret = -ENOMEM;
427 goto err_rx_dma_buf;
428 }
429
430 cfg.src_addr = phy_addr + SPI_POPR;
431 cfg.dst_addr = phy_addr + SPI_PUSHR;
432 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
433 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
434 cfg.src_maxburst = 1;
435 cfg.dst_maxburst = 1;
436
437 cfg.direction = DMA_DEV_TO_MEM;
438 ret = dmaengine_slave_config(dma->chan_rx, &cfg);
439 if (ret) {
440 dev_err(dev, "can't configure rx dma channel\n");
441 ret = -EINVAL;
442 goto err_slave_config;
443 }
444
445 cfg.direction = DMA_MEM_TO_DEV;
446 ret = dmaengine_slave_config(dma->chan_tx, &cfg);
447 if (ret) {
448 dev_err(dev, "can't configure tx dma channel\n");
449 ret = -EINVAL;
450 goto err_slave_config;
451 }
452
453 dspi->dma = dma;
454 init_completion(&dma->cmd_tx_complete);
455 init_completion(&dma->cmd_rx_complete);
456
457 return 0;
458
459err_slave_config:
Sanchayan Maity27d21e92016-11-22 12:31:32 +0530460 dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
461 dma->rx_dma_buf, dma->rx_dma_phys);
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530462err_rx_dma_buf:
Sanchayan Maity27d21e92016-11-22 12:31:32 +0530463 dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
464 dma->tx_dma_buf, dma->tx_dma_phys);
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530465err_tx_dma_buf:
466 dma_release_channel(dma->chan_tx);
467err_tx_channel:
468 dma_release_channel(dma->chan_rx);
469
470 devm_kfree(dev, dma);
471 dspi->dma = NULL;
472
473 return ret;
474}
475
476static void dspi_release_dma(struct fsl_dspi *dspi)
477{
478 struct fsl_dspi_dma *dma = dspi->dma;
479 struct device *dev = &dspi->pdev->dev;
480
481 if (dma) {
482 if (dma->chan_tx) {
483 dma_unmap_single(dev, dma->tx_dma_phys,
484 DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
485 dma_release_channel(dma->chan_tx);
486 }
487
488 if (dma->chan_rx) {
489 dma_unmap_single(dev, dma->rx_dma_phys,
490 DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
491 dma_release_channel(dma->chan_rx);
492 }
493 }
494}
495
Chao Fu349ad662013-08-16 11:08:55 +0800496static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
497 unsigned long clkrate)
498{
499 /* Valid baud rate pre-scaler values */
500 int pbr_tbl[4] = {2, 3, 5, 7};
501 int brs[16] = { 2, 4, 6, 8,
502 16, 32, 64, 128,
503 256, 512, 1024, 2048,
504 4096, 8192, 16384, 32768 };
Aaron Brice6fd63082015-03-30 10:49:15 -0700505 int scale_needed, scale, minscale = INT_MAX;
506 int i, j;
Chao Fu349ad662013-08-16 11:08:55 +0800507
Aaron Brice6fd63082015-03-30 10:49:15 -0700508 scale_needed = clkrate / speed_hz;
Aaron Bricee689d6d2015-04-03 13:39:29 -0700509 if (clkrate % speed_hz)
510 scale_needed++;
Chao Fu349ad662013-08-16 11:08:55 +0800511
Aaron Brice6fd63082015-03-30 10:49:15 -0700512 for (i = 0; i < ARRAY_SIZE(brs); i++)
513 for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
514 scale = brs[i] * pbr_tbl[j];
515 if (scale >= scale_needed) {
516 if (scale < minscale) {
517 minscale = scale;
518 *br = i;
519 *pbr = j;
520 }
521 break;
Chao Fu349ad662013-08-16 11:08:55 +0800522 }
523 }
524
Aaron Brice6fd63082015-03-30 10:49:15 -0700525 if (minscale == INT_MAX) {
526 pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
527 speed_hz, clkrate);
528 *pbr = ARRAY_SIZE(pbr_tbl) - 1;
529 *br = ARRAY_SIZE(brs) - 1;
530 }
Chao Fu349ad662013-08-16 11:08:55 +0800531}
532
Aaron Brice95bf15f2015-04-03 13:39:31 -0700533static void ns_delay_scale(char *psc, char *sc, int delay_ns,
534 unsigned long clkrate)
535{
536 int pscale_tbl[4] = {1, 3, 5, 7};
537 int scale_needed, scale, minscale = INT_MAX;
538 int i, j;
539 u32 remainder;
540
541 scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
542 &remainder);
543 if (remainder)
544 scale_needed++;
545
546 for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
547 for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
548 scale = pscale_tbl[i] * (2 << j);
549 if (scale >= scale_needed) {
550 if (scale < minscale) {
551 minscale = scale;
552 *psc = i;
553 *sc = j;
554 }
555 break;
556 }
557 }
558
559 if (minscale == INT_MAX) {
560 pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
561 delay_ns, clkrate);
562 *psc = ARRAY_SIZE(pscale_tbl) - 1;
563 *sc = SPI_CTAR_SCALE_BITS;
564 }
Chao Fu349ad662013-08-16 11:08:55 +0800565}
566
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200567static void fifo_write(struct fsl_dspi *dspi)
Haikun Wangd1f4a382015-06-09 19:45:27 +0800568{
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200569 regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
Haikun Wangd1f4a382015-06-09 19:45:27 +0800570}
571
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200572static void cmd_fifo_write(struct fsl_dspi *dspi)
573{
574 u16 cmd = dspi->tx_cmd;
575
576 if (dspi->len > 0)
577 cmd |= SPI_PUSHR_CMD_CONT;
578 regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
579}
580
581static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
582{
583 regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
584}
585
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200586static void dspi_tcfq_write(struct fsl_dspi *dspi)
Haikun Wangd1f4a382015-06-09 19:45:27 +0800587{
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200588 /* Clear transfer count */
589 dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200590
591 if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
592 /* Write two TX FIFO entries first, and then the corresponding
593 * CMD FIFO entry.
594 */
595 u32 data = dspi_pop_tx(dspi);
596
597 if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
598 /* LSB */
599 tx_fifo_write(dspi, data & 0xFFFF);
600 tx_fifo_write(dspi, data >> 16);
601 } else {
602 /* MSB */
603 tx_fifo_write(dspi, data >> 16);
604 tx_fifo_write(dspi, data & 0xFFFF);
605 }
606 cmd_fifo_write(dspi);
607 } else {
608 /* Write one entry to both TX FIFO and CMD FIFO
609 * simultaneously.
610 */
611 fifo_write(dspi);
612 }
Haikun Wangd1f4a382015-06-09 19:45:27 +0800613}
614
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200615static u32 fifo_read(struct fsl_dspi *dspi)
Chao Fu349ad662013-08-16 11:08:55 +0800616{
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200617 u32 rxdata = 0;
Chao Fu349ad662013-08-16 11:08:55 +0800618
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200619 regmap_read(dspi->regmap, SPI_POPR, &rxdata);
620 return rxdata;
Haikun Wangd1f4a382015-06-09 19:45:27 +0800621}
622
623static void dspi_tcfq_read(struct fsl_dspi *dspi)
624{
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200625 dspi_push_rx(dspi, fifo_read(dspi));
626}
Haikun Wangd1f4a382015-06-09 19:45:27 +0800627
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200628static void dspi_eoq_write(struct fsl_dspi *dspi)
629{
630 int fifo_size = DSPI_FIFO_SIZE;
Haikun Wangd1f4a382015-06-09 19:45:27 +0800631
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200632 /* Fill TX FIFO with as many transfers as possible */
633 while (dspi->len && fifo_size--) {
634 /* Request EOQF for last transfer in FIFO */
635 if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
636 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
637 /* Clear transfer count for first transfer in FIFO */
638 if (fifo_size == (DSPI_FIFO_SIZE - 1))
639 dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
640 /* Write combined TX FIFO and CMD FIFO entry */
641 fifo_write(dspi);
642 }
643}
644
645static void dspi_eoq_read(struct fsl_dspi *dspi)
646{
647 int fifo_size = DSPI_FIFO_SIZE;
648
649 /* Read one FIFO entry at and push to rx buffer */
650 while ((dspi->rx < dspi->rx_end) && fifo_size--)
651 dspi_push_rx(dspi, fifo_read(dspi));
Haikun Wangd1f4a382015-06-09 19:45:27 +0800652}
653
Chao Fu9298bc72015-01-27 16:27:22 +0530654static int dspi_transfer_one_message(struct spi_master *master,
655 struct spi_message *message)
Chao Fu349ad662013-08-16 11:08:55 +0800656{
Chao Fu9298bc72015-01-27 16:27:22 +0530657 struct fsl_dspi *dspi = spi_master_get_devdata(master);
658 struct spi_device *spi = message->spi;
659 struct spi_transfer *transfer;
660 int status = 0;
Haikun Wangd1f4a382015-06-09 19:45:27 +0800661 enum dspi_trans_mode trans_mode;
662
Chao Fu9298bc72015-01-27 16:27:22 +0530663 message->actual_length = 0;
Chao Fu349ad662013-08-16 11:08:55 +0800664
Chao Fu9298bc72015-01-27 16:27:22 +0530665 list_for_each_entry(transfer, &message->transfers, transfer_list) {
666 dspi->cur_transfer = transfer;
667 dspi->cur_msg = message;
668 dspi->cur_chip = spi_get_ctldata(spi);
Esben Haabendal9e1dc9b2018-06-20 09:34:33 +0200669 /* Prepare command word for CMD FIFO */
670 dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
671 SPI_PUSHR_CMD_PCS(spi->chip_select);
Andrey Vostrikov92dc20d2016-04-05 15:33:14 +0300672 if (list_is_last(&dspi->cur_transfer->transfer_list,
Esben Haabendal9e1dc9b2018-06-20 09:34:33 +0200673 &dspi->cur_msg->transfers)) {
674 /* Leave PCS activated after last transfer when
675 * cs_change is set.
676 */
677 if (transfer->cs_change)
678 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
679 } else {
680 /* Keep PCS active between transfers in same message
681 * when cs_change is not set, and de-activate PCS
682 * between transfers in the same message when
683 * cs_change is set.
684 */
685 if (!transfer->cs_change)
686 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
687 }
688
Chao Fu9298bc72015-01-27 16:27:22 +0530689 dspi->void_write_data = dspi->cur_chip->void_write_data;
Chao Fu349ad662013-08-16 11:08:55 +0800690
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200691 dspi->tx = transfer->tx_buf;
Chao Fu9298bc72015-01-27 16:27:22 +0530692 dspi->rx = transfer->rx_buf;
693 dspi->rx_end = dspi->rx + transfer->len;
694 dspi->len = transfer->len;
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200695 /* Validated transfer specific frame size (defaults applied) */
696 dspi->bits_per_word = transfer->bits_per_word;
697 if (transfer->bits_per_word <= 8)
698 dspi->bytes_per_word = 1;
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200699 else if (transfer->bits_per_word <= 16)
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200700 dspi->bytes_per_word = 2;
Esben Haabendal8fcd1512018-06-20 09:34:40 +0200701 else
702 dspi->bytes_per_word = 4;
Chao Fu349ad662013-08-16 11:08:55 +0800703
Chao Fu9298bc72015-01-27 16:27:22 +0530704 regmap_update_bits(dspi->regmap, SPI_MCR,
Esben Haabendald87e08f2018-06-20 09:34:37 +0200705 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
706 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
Bhuvanchandra DVef22d162015-12-10 11:25:30 +0530707 regmap_write(dspi->regmap, SPI_CTAR(0),
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200708 dspi->cur_chip->ctar_val |
709 SPI_FRAME_BITS(transfer->bits_per_word));
Esben Haabendal51d583a2018-06-20 09:34:39 +0200710 if (dspi->devtype_data->xspi_mode)
711 regmap_write(dspi->regmap, SPI_CTARE(0),
712 SPI_FRAME_EBITS(transfer->bits_per_word)
713 | SPI_CTARE_DTCP(1));
Chao Fu349ad662013-08-16 11:08:55 +0800714
Haikun Wangd1f4a382015-06-09 19:45:27 +0800715 trans_mode = dspi->devtype_data->trans_mode;
716 switch (trans_mode) {
717 case DSPI_EOQ_MODE:
718 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
Haikun Wangc042af92015-06-09 19:45:37 +0800719 dspi_eoq_write(dspi);
Haikun Wangd1f4a382015-06-09 19:45:27 +0800720 break;
721 case DSPI_TCFQ_MODE:
722 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
Haikun Wangc042af92015-06-09 19:45:37 +0800723 dspi_tcfq_write(dspi);
Haikun Wangd1f4a382015-06-09 19:45:27 +0800724 break;
Sanchayan Maity90ba3702016-11-10 17:49:15 +0530725 case DSPI_DMA_MODE:
726 regmap_write(dspi->regmap, SPI_RSER,
727 SPI_RSER_TFFFE | SPI_RSER_TFFFD |
728 SPI_RSER_RFDFE | SPI_RSER_RFDFD);
729 status = dspi_dma_xfer(dspi);
Sanchayan Maity98114302016-11-17 17:46:48 +0530730 break;
Haikun Wangd1f4a382015-06-09 19:45:27 +0800731 default:
732 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
733 trans_mode);
734 status = -EINVAL;
735 goto out;
736 }
Chao Fu349ad662013-08-16 11:08:55 +0800737
Sanchayan Maity98114302016-11-17 17:46:48 +0530738 if (trans_mode != DSPI_DMA_MODE) {
739 if (wait_event_interruptible(dspi->waitq,
740 dspi->waitflags))
741 dev_err(&dspi->pdev->dev,
742 "wait transfer complete fail!\n");
743 dspi->waitflags = 0;
744 }
Chao Fu349ad662013-08-16 11:08:55 +0800745
Chao Fu9298bc72015-01-27 16:27:22 +0530746 if (transfer->delay_usecs)
747 udelay(transfer->delay_usecs);
Chao Fu349ad662013-08-16 11:08:55 +0800748 }
749
Haikun Wangd1f4a382015-06-09 19:45:27 +0800750out:
Chao Fu9298bc72015-01-27 16:27:22 +0530751 message->status = status;
752 spi_finalize_current_message(master);
753
754 return status;
Chao Fu349ad662013-08-16 11:08:55 +0800755}
756
Chao Fu9298bc72015-01-27 16:27:22 +0530757static int dspi_setup(struct spi_device *spi)
Chao Fu349ad662013-08-16 11:08:55 +0800758{
759 struct chip_data *chip;
760 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
Angelo Dureghelloec7ed772017-10-28 00:23:01 +0200761 struct fsl_dspi_platform_data *pdata;
Aaron Brice95bf15f2015-04-03 13:39:31 -0700762 u32 cs_sck_delay = 0, sck_cs_delay = 0;
763 unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200764 unsigned char pasc = 0, asc = 0;
Aaron Brice95bf15f2015-04-03 13:39:31 -0700765 unsigned long clkrate;
Chao Fu349ad662013-08-16 11:08:55 +0800766
767 /* Only alloc on first setup */
768 chip = spi_get_ctldata(spi);
769 if (chip == NULL) {
Bhuvanchandra DV973fbce2015-01-27 16:27:20 +0530770 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
Chao Fu349ad662013-08-16 11:08:55 +0800771 if (!chip)
772 return -ENOMEM;
773 }
774
Angelo Dureghelloec7ed772017-10-28 00:23:01 +0200775 pdata = dev_get_platdata(&dspi->pdev->dev);
Aaron Brice95bf15f2015-04-03 13:39:31 -0700776
Angelo Dureghelloec7ed772017-10-28 00:23:01 +0200777 if (!pdata) {
778 of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
779 &cs_sck_delay);
780
781 of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
782 &sck_cs_delay);
783 } else {
784 cs_sck_delay = pdata->cs_sck_delay;
785 sck_cs_delay = pdata->sck_cs_delay;
786 }
Aaron Brice95bf15f2015-04-03 13:39:31 -0700787
Chao Fu349ad662013-08-16 11:08:55 +0800788 chip->void_write_data = 0;
789
Aaron Brice95bf15f2015-04-03 13:39:31 -0700790 clkrate = clk_get_rate(dspi->clk);
791 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
792
793 /* Set PCS to SCK delay scale values */
794 ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
795
796 /* Set After SCK delay scale values */
797 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
Chao Fu349ad662013-08-16 11:08:55 +0800798
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200799 chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
Chao Fu349ad662013-08-16 11:08:55 +0800800 | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
801 | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
Aaron Brice95bf15f2015-04-03 13:39:31 -0700802 | SPI_CTAR_PCSSCK(pcssck)
803 | SPI_CTAR_CSSCK(cssck)
804 | SPI_CTAR_PASC(pasc)
805 | SPI_CTAR_ASC(asc)
Chao Fu349ad662013-08-16 11:08:55 +0800806 | SPI_CTAR_PBR(pbr)
807 | SPI_CTAR_BR(br);
808
809 spi_set_ctldata(spi, chip);
810
811 return 0;
812}
813
Bhuvanchandra DV973fbce2015-01-27 16:27:20 +0530814static void dspi_cleanup(struct spi_device *spi)
815{
816 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
817
818 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
819 spi->master->bus_num, spi->chip_select);
820
821 kfree(chip);
822}
823
Chao Fu349ad662013-08-16 11:08:55 +0800824static irqreturn_t dspi_interrupt(int irq, void *dev_id)
825{
826 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
Chao Fu9298bc72015-01-27 16:27:22 +0530827 struct spi_message *msg = dspi->cur_msg;
Haikun Wangd1f4a382015-06-09 19:45:27 +0800828 enum dspi_trans_mode trans_mode;
Haikun Wangc042af92015-06-09 19:45:37 +0800829 u32 spi_sr, spi_tcr;
Esben Haabendal0a4ec2c2018-06-20 09:34:34 +0200830 u16 spi_tcnt;
Chao Fu349ad662013-08-16 11:08:55 +0800831
Haikun Wangd1f4a382015-06-09 19:45:27 +0800832 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
833 regmap_write(dspi->regmap, SPI_SR, spi_sr);
834
Chao Fu349ad662013-08-16 11:08:55 +0800835
Haikun Wangc042af92015-06-09 19:45:37 +0800836 if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
Esben Haabendal0a4ec2c2018-06-20 09:34:34 +0200837 /* Get transfer counter (in number of SPI transfers). It was
838 * reset to 0 when transfer(s) were started.
839 */
Haikun Wangc042af92015-06-09 19:45:37 +0800840 regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
841 spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
Esben Haabendal0a4ec2c2018-06-20 09:34:34 +0200842 /* Update total number of bytes that were transferred */
Esben Haabendaldadcf4a2018-06-20 09:34:35 +0200843 msg->actual_length += spi_tcnt * dspi->bytes_per_word;
Haikun Wangc042af92015-06-09 19:45:37 +0800844
845 trans_mode = dspi->devtype_data->trans_mode;
Haikun Wangd1f4a382015-06-09 19:45:27 +0800846 switch (trans_mode) {
847 case DSPI_EOQ_MODE:
Haikun Wangc042af92015-06-09 19:45:37 +0800848 dspi_eoq_read(dspi);
Haikun Wangd1f4a382015-06-09 19:45:27 +0800849 break;
850 case DSPI_TCFQ_MODE:
Haikun Wangc042af92015-06-09 19:45:37 +0800851 dspi_tcfq_read(dspi);
Haikun Wangd1f4a382015-06-09 19:45:27 +0800852 break;
853 default:
854 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
855 trans_mode);
Haikun Wangc042af92015-06-09 19:45:37 +0800856 return IRQ_HANDLED;
857 }
858
859 if (!dspi->len) {
Haikun Wangc042af92015-06-09 19:45:37 +0800860 dspi->waitflags = 1;
861 wake_up_interruptible(&dspi->waitq);
862 } else {
863 switch (trans_mode) {
864 case DSPI_EOQ_MODE:
865 dspi_eoq_write(dspi);
866 break;
867 case DSPI_TCFQ_MODE:
868 dspi_tcfq_write(dspi);
869 break;
870 default:
871 dev_err(&dspi->pdev->dev,
872 "unsupported trans_mode %u\n",
873 trans_mode);
874 }
Haikun Wangd1f4a382015-06-09 19:45:27 +0800875 }
876 }
Haikun Wangc042af92015-06-09 19:45:37 +0800877
Chao Fu349ad662013-08-16 11:08:55 +0800878 return IRQ_HANDLED;
879}
880
Jingoo Han790d1902014-05-07 16:45:41 +0900881static const struct of_device_id fsl_dspi_dt_ids[] = {
Julia Lawall230c08b2018-01-02 14:28:06 +0100882 { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
883 { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
884 { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
Chao Fu349ad662013-08-16 11:08:55 +0800885 { /* sentinel */ }
886};
887MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
888
889#ifdef CONFIG_PM_SLEEP
890static int dspi_suspend(struct device *dev)
891{
892 struct spi_master *master = dev_get_drvdata(dev);
893 struct fsl_dspi *dspi = spi_master_get_devdata(master);
894
895 spi_master_suspend(master);
896 clk_disable_unprepare(dspi->clk);
897
Mirza Krak432a17d2015-06-12 18:55:22 +0200898 pinctrl_pm_select_sleep_state(dev);
899
Chao Fu349ad662013-08-16 11:08:55 +0800900 return 0;
901}
902
903static int dspi_resume(struct device *dev)
904{
Chao Fu349ad662013-08-16 11:08:55 +0800905 struct spi_master *master = dev_get_drvdata(dev);
906 struct fsl_dspi *dspi = spi_master_get_devdata(master);
Fabio Estevam1c5ea2b2016-08-21 23:05:30 -0300907 int ret;
Chao Fu349ad662013-08-16 11:08:55 +0800908
Mirza Krak432a17d2015-06-12 18:55:22 +0200909 pinctrl_pm_select_default_state(dev);
910
Fabio Estevam1c5ea2b2016-08-21 23:05:30 -0300911 ret = clk_prepare_enable(dspi->clk);
912 if (ret)
913 return ret;
Chao Fu349ad662013-08-16 11:08:55 +0800914 spi_master_resume(master);
915
916 return 0;
917}
918#endif /* CONFIG_PM_SLEEP */
919
Jingoo Hanba811ad2014-02-26 10:30:14 +0900920static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
Chao Fu349ad662013-08-16 11:08:55 +0800921
Esben Haabendal85700432018-06-20 09:34:36 +0200922static const struct regmap_range dspi_volatile_ranges[] = {
923 regmap_reg_range(SPI_MCR, SPI_TCR),
924 regmap_reg_range(SPI_SR, SPI_SR),
925 regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
926};
927
928static const struct regmap_access_table dspi_volatile_table = {
929 .yes_ranges = dspi_volatile_ranges,
930 .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
931};
932
Xiubo Li409851c2014-10-09 11:27:45 +0800933static const struct regmap_config dspi_regmap_config = {
Chao Fu1acbdeb2014-02-12 15:29:05 +0800934 .reg_bits = 32,
935 .val_bits = 32,
936 .reg_stride = 4,
937 .max_register = 0x88,
Esben Haabendal85700432018-06-20 09:34:36 +0200938 .volatile_table = &dspi_volatile_table,
Chao Fu349ad662013-08-16 11:08:55 +0800939};
940
Esben Haabendal58ba07ec2018-06-20 09:34:38 +0200941static const struct regmap_range dspi_xspi_volatile_ranges[] = {
942 regmap_reg_range(SPI_MCR, SPI_TCR),
943 regmap_reg_range(SPI_SR, SPI_SR),
944 regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
945 regmap_reg_range(SPI_SREX, SPI_SREX),
946};
947
948static const struct regmap_access_table dspi_xspi_volatile_table = {
949 .yes_ranges = dspi_xspi_volatile_ranges,
950 .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
951};
952
953static const struct regmap_config dspi_xspi_regmap_config[] = {
954 {
955 .reg_bits = 32,
956 .val_bits = 32,
957 .reg_stride = 4,
958 .max_register = 0x13c,
959 .volatile_table = &dspi_xspi_volatile_table,
960 },
961 {
962 .name = "pushr",
963 .reg_bits = 16,
964 .val_bits = 16,
965 .reg_stride = 2,
966 .max_register = 0x2,
967 },
968};
969
Yuan Yao5ee67b52016-10-17 18:02:34 +0800970static void dspi_init(struct fsl_dspi *dspi)
971{
Esben Haabendal3e7cc622018-06-20 09:34:42 +0200972 regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
973 (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
Yuan Yao5ee67b52016-10-17 18:02:34 +0800974 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
Esben Haabendal51d583a2018-06-20 09:34:39 +0200975 if (dspi->devtype_data->xspi_mode)
976 regmap_write(dspi->regmap, SPI_CTARE(0),
977 SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
Yuan Yao5ee67b52016-10-17 18:02:34 +0800978}
979
Chao Fu349ad662013-08-16 11:08:55 +0800980static int dspi_probe(struct platform_device *pdev)
981{
982 struct device_node *np = pdev->dev.of_node;
983 struct spi_master *master;
984 struct fsl_dspi *dspi;
985 struct resource *res;
Esben Haabendal58ba07ec2018-06-20 09:34:38 +0200986 const struct regmap_config *regmap_config;
Chao Fu1acbdeb2014-02-12 15:29:05 +0800987 void __iomem *base;
Angelo Dureghelloec7ed772017-10-28 00:23:01 +0200988 struct fsl_dspi_platform_data *pdata;
Chao Fu349ad662013-08-16 11:08:55 +0800989 int ret = 0, cs_num, bus_num;
990
991 master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
992 if (!master)
993 return -ENOMEM;
994
995 dspi = spi_master_get_devdata(master);
996 dspi->pdev = pdev;
Chao Fu9298bc72015-01-27 16:27:22 +0530997 dspi->master = master;
998
999 master->transfer = NULL;
1000 master->setup = dspi_setup;
1001 master->transfer_one_message = dspi_transfer_one_message;
1002 master->dev.of_node = pdev->dev.of_node;
Chao Fu349ad662013-08-16 11:08:55 +08001003
Bhuvanchandra DV973fbce2015-01-27 16:27:20 +05301004 master->cleanup = dspi_cleanup;
Kurt Kanzenbach00ac9562017-11-13 08:47:21 +01001005 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
Chao Fu349ad662013-08-16 11:08:55 +08001006
Angelo Dureghelloec7ed772017-10-28 00:23:01 +02001007 pdata = dev_get_platdata(&pdev->dev);
1008 if (pdata) {
1009 master->num_chipselect = pdata->cs_num;
1010 master->bus_num = pdata->bus_num;
Chao Fu349ad662013-08-16 11:08:55 +08001011
Angelo Dureghelloec7ed772017-10-28 00:23:01 +02001012 dspi->devtype_data = &coldfire_data;
1013 } else {
Chao Fu349ad662013-08-16 11:08:55 +08001014
Angelo Dureghelloec7ed772017-10-28 00:23:01 +02001015 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
1016 if (ret < 0) {
1017 dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1018 goto out_master_put;
1019 }
1020 master->num_chipselect = cs_num;
1021
1022 ret = of_property_read_u32(np, "bus-num", &bus_num);
1023 if (ret < 0) {
1024 dev_err(&pdev->dev, "can't get bus-num\n");
1025 goto out_master_put;
1026 }
1027 master->bus_num = bus_num;
1028
1029 dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1030 if (!dspi->devtype_data) {
1031 dev_err(&pdev->dev, "can't get devtype_data\n");
1032 ret = -EFAULT;
1033 goto out_master_put;
1034 }
Haikun Wangd1f4a382015-06-09 19:45:27 +08001035 }
1036
Esben Haabendal35c9d462018-06-20 09:34:41 +02001037 if (dspi->devtype_data->xspi_mode)
1038 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1039 else
1040 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1041
Chao Fu349ad662013-08-16 11:08:55 +08001042 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Chao Fu1acbdeb2014-02-12 15:29:05 +08001043 base = devm_ioremap_resource(&pdev->dev, res);
1044 if (IS_ERR(base)) {
1045 ret = PTR_ERR(base);
Chao Fu349ad662013-08-16 11:08:55 +08001046 goto out_master_put;
1047 }
1048
Esben Haabendal58ba07ec2018-06-20 09:34:38 +02001049 if (dspi->devtype_data->xspi_mode)
1050 regmap_config = &dspi_xspi_regmap_config[0];
1051 else
1052 regmap_config = &dspi_regmap_config;
1053 dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
Chao Fu1acbdeb2014-02-12 15:29:05 +08001054 if (IS_ERR(dspi->regmap)) {
1055 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1056 PTR_ERR(dspi->regmap));
Christophe JAILLETfbad6c22017-02-19 14:19:02 +01001057 ret = PTR_ERR(dspi->regmap);
1058 goto out_master_put;
Chao Fu1acbdeb2014-02-12 15:29:05 +08001059 }
1060
Esben Haabendal58ba07ec2018-06-20 09:34:38 +02001061 if (dspi->devtype_data->xspi_mode) {
1062 dspi->regmap_pushr = devm_regmap_init_mmio(
1063 &pdev->dev, base + SPI_PUSHR,
1064 &dspi_xspi_regmap_config[1]);
1065 if (IS_ERR(dspi->regmap_pushr)) {
1066 dev_err(&pdev->dev,
1067 "failed to init pushr regmap: %ld\n",
1068 PTR_ERR(dspi->regmap_pushr));
Gustavo A. R. Silva80dc12c2018-06-21 08:22:09 -05001069 ret = PTR_ERR(dspi->regmap_pushr);
Esben Haabendal58ba07ec2018-06-20 09:34:38 +02001070 goto out_master_put;
1071 }
1072 }
1073
Yuan Yao5ee67b52016-10-17 18:02:34 +08001074 dspi_init(dspi);
Chao Fu349ad662013-08-16 11:08:55 +08001075 dspi->irq = platform_get_irq(pdev, 0);
1076 if (dspi->irq < 0) {
1077 dev_err(&pdev->dev, "can't get platform irq\n");
1078 ret = dspi->irq;
1079 goto out_master_put;
1080 }
1081
1082 ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
1083 pdev->name, dspi);
1084 if (ret < 0) {
1085 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1086 goto out_master_put;
1087 }
1088
1089 dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1090 if (IS_ERR(dspi->clk)) {
1091 ret = PTR_ERR(dspi->clk);
1092 dev_err(&pdev->dev, "unable to get clock\n");
1093 goto out_master_put;
1094 }
Fabio Estevam1c5ea2b2016-08-21 23:05:30 -03001095 ret = clk_prepare_enable(dspi->clk);
1096 if (ret)
1097 goto out_master_put;
Chao Fu349ad662013-08-16 11:08:55 +08001098
Sanchayan Maity90ba3702016-11-10 17:49:15 +05301099 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
Nikita Yushchenkocddebdd2017-05-22 16:19:20 +03001100 ret = dspi_request_dma(dspi, res->start);
1101 if (ret < 0) {
Sanchayan Maity90ba3702016-11-10 17:49:15 +05301102 dev_err(&pdev->dev, "can't get dma channels\n");
1103 goto out_clk_put;
1104 }
1105 }
1106
Bhuvanchandra DV9419b202016-03-22 01:41:52 +05301107 master->max_speed_hz =
1108 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1109
Chao Fu349ad662013-08-16 11:08:55 +08001110 init_waitqueue_head(&dspi->waitq);
Axel Lin017145f2014-02-14 12:49:12 +08001111 platform_set_drvdata(pdev, master);
Chao Fu349ad662013-08-16 11:08:55 +08001112
Chao Fu9298bc72015-01-27 16:27:22 +05301113 ret = spi_register_master(master);
Chao Fu349ad662013-08-16 11:08:55 +08001114 if (ret != 0) {
1115 dev_err(&pdev->dev, "Problem registering DSPI master\n");
1116 goto out_clk_put;
1117 }
1118
Chao Fu349ad662013-08-16 11:08:55 +08001119 return ret;
1120
1121out_clk_put:
1122 clk_disable_unprepare(dspi->clk);
1123out_master_put:
1124 spi_master_put(master);
Chao Fu349ad662013-08-16 11:08:55 +08001125
1126 return ret;
1127}
1128
1129static int dspi_remove(struct platform_device *pdev)
1130{
Axel Lin017145f2014-02-14 12:49:12 +08001131 struct spi_master *master = platform_get_drvdata(pdev);
1132 struct fsl_dspi *dspi = spi_master_get_devdata(master);
Chao Fu349ad662013-08-16 11:08:55 +08001133
1134 /* Disconnect from the SPI framework */
Sanchayan Maity90ba3702016-11-10 17:49:15 +05301135 dspi_release_dma(dspi);
Wei Yongjun05209f42013-10-12 15:15:31 +08001136 clk_disable_unprepare(dspi->clk);
Chao Fu9298bc72015-01-27 16:27:22 +05301137 spi_unregister_master(dspi->master);
Chao Fu349ad662013-08-16 11:08:55 +08001138
1139 return 0;
1140}
1141
1142static struct platform_driver fsl_dspi_driver = {
1143 .driver.name = DRIVER_NAME,
1144 .driver.of_match_table = fsl_dspi_dt_ids,
1145 .driver.owner = THIS_MODULE,
1146 .driver.pm = &dspi_pm,
1147 .probe = dspi_probe,
1148 .remove = dspi_remove,
1149};
1150module_platform_driver(fsl_dspi_driver);
1151
1152MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
Uwe Kleine-Königb444d1d2013-09-10 10:46:33 +02001153MODULE_LICENSE("GPL");
Chao Fu349ad662013-08-16 11:08:55 +08001154MODULE_ALIAS("platform:" DRIVER_NAME);