blob: 2b8d339f193693d034c4760ea44b017c121c4c61 [file] [log] [blame]
Fabio Estevam79650592018-05-02 16:18:27 -03001// SPDX-License-Identifier: GPL-2.0+
2// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3// Copyright (C) 2008 Juergen Beisert
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07004
5#include <linux/clk.h>
6#include <linux/completion.h>
7#include <linux/delay.h>
Robin Gongf62cacc2014-09-11 09:18:44 +08008#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070010#include <linux/err.h>
11#include <linux/gpio.h>
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070012#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070019#include <linux/spi/spi.h>
20#include <linux/spi/spi_bitbang.h>
21#include <linux/types.h>
Shawn Guo22a85e42011-07-10 01:16:41 +080022#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070025
Robin Gongf62cacc2014-09-11 09:18:44 +080026#include <linux/platform_data/dma-imx.h>
Arnd Bergmann82906b12012-08-24 15:14:29 +020027#include <linux/platform_data/spi-imx.h>
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070028
29#define DRIVER_NAME "spi_imx"
30
Trent Piepho0a9c8992019-03-04 23:02:36 +000031static bool use_dma = true;
32module_param(use_dma, bool, 0644);
33MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
34
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070035#define MXC_CSPIRXDATA 0x00
36#define MXC_CSPITXDATA 0x04
37#define MXC_CSPICTRL 0x08
38#define MXC_CSPIINT 0x0c
39#define MXC_RESET 0x1c
40
41/* generic defines to abstract from the different register layouts */
42#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
43#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
jiada wang71abd292017-09-05 14:12:32 +090044#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070045
Uwe Kleine-König30d67142018-11-30 07:47:07 +010046/* The maximum bytes that a sdma BD can transfer. */
47#define MAX_SDMA_BD_BYTES (1 << 15)
jiada wang1673c812017-08-10 13:50:08 +090048#define MX51_ECSPI_CTRL_MAX_BURST 512
jiada wang71abd292017-09-05 14:12:32 +090049/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
50#define MX53_MAX_TRANSFER_BYTES 512
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070051
Uwe Kleine-Königf4ba6312010-09-09 15:29:01 +020052enum spi_imx_devtype {
Shawn Guo04ee5852011-07-10 01:16:39 +080053 IMX1_CSPI,
54 IMX21_CSPI,
55 IMX27_CSPI,
56 IMX31_CSPI,
57 IMX35_CSPI, /* CSPI on all i.mx except above */
jiada wang26e4bb82017-06-08 14:16:01 +090058 IMX51_ECSPI, /* ECSPI on i.mx51 */
59 IMX53_ECSPI, /* ECSPI on i.mx53 and later */
Uwe Kleine-Königf4ba6312010-09-09 15:29:01 +020060};
61
62struct spi_imx_data;
63
64struct spi_imx_devtype_data {
65 void (*intctrl)(struct spi_imx_data *, int);
Uwe Kleine-Könige6972712018-11-30 07:47:05 +010066 int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
Uwe Kleine-König1d374702018-11-30 07:47:08 +010067 int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *,
68 struct spi_transfer *);
Uwe Kleine-Königf4ba6312010-09-09 15:29:01 +020069 void (*trigger)(struct spi_imx_data *);
70 int (*rx_available)(struct spi_imx_data *);
Uwe Kleine-König1723e662010-09-10 09:19:18 +020071 void (*reset)(struct spi_imx_data *);
Robin Gong987a2df2018-10-10 10:32:42 +000072 void (*setup_wml)(struct spi_imx_data *);
jiada wang71abd292017-09-05 14:12:32 +090073 void (*disable)(struct spi_imx_data *);
Robin Gongbcd8e772020-05-21 04:34:17 +080074 void (*disable_dma)(struct spi_imx_data *);
jiada wangfd8d4e22017-06-08 14:16:00 +090075 bool has_dmamode;
jiada wang71abd292017-09-05 14:12:32 +090076 bool has_slavemode;
jiada wangfd8d4e22017-06-08 14:16:00 +090077 unsigned int fifo_size;
jiada wang1673c812017-08-10 13:50:08 +090078 bool dynamic_burst;
Shawn Guo04ee5852011-07-10 01:16:39 +080079 enum spi_imx_devtype devtype;
Uwe Kleine-Königf4ba6312010-09-09 15:29:01 +020080};
81
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070082struct spi_imx_data {
83 struct spi_bitbang bitbang;
Sascha Hauer6aa800c2016-02-17 14:28:48 +010084 struct device *dev;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070085
86 struct completion xfer_done;
Uwe Kleine-Königcc4d22a2012-03-29 21:54:18 +020087 void __iomem *base;
Anton Bondarenkof12ae172016-02-24 09:20:29 +010088 unsigned long base_phys;
89
Sascha Haueraa29d8402012-03-07 09:30:22 +010090 struct clk *clk_per;
91 struct clk *clk_ipg;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070092 unsigned long spi_clk;
Anton Bondarenko4bfe9272016-02-19 08:43:03 +010093 unsigned int spi_bus_clk;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070094
Sascha Hauerd52345b2017-06-02 07:38:01 +020095 unsigned int bits_per_word;
Leif Middelschultef72efa72017-04-23 21:19:58 +020096 unsigned int spi_drctl;
Anton Bondarenkof12ae172016-02-24 09:20:29 +010097
jiada wang1673c812017-08-10 13:50:08 +090098 unsigned int count, remainder;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -070099 void (*tx)(struct spi_imx_data *);
100 void (*rx)(struct spi_imx_data *);
101 void *rx_buf;
102 const void *tx_buf;
103 unsigned int txfifo; /* number of words pushed in tx FIFO */
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200104 unsigned int dynamic_burst;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700105
jiada wang71abd292017-09-05 14:12:32 +0900106 /* Slave mode */
107 bool slave_mode;
108 bool slave_aborted;
109 unsigned int slave_burst;
110
Robin Gongf62cacc2014-09-11 09:18:44 +0800111 /* DMA */
Robin Gongf62cacc2014-09-11 09:18:44 +0800112 bool usedma;
Anton Bondarenko0dfbaa82015-12-05 17:57:01 +0100113 u32 wml;
Robin Gongf62cacc2014-09-11 09:18:44 +0800114 struct completion dma_rx_completion;
115 struct completion dma_tx_completion;
116
Uwe Kleine-König80023cb2012-05-21 21:49:35 +0200117 const struct spi_imx_devtype_data *devtype_data;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700118};
119
Shawn Guo04ee5852011-07-10 01:16:39 +0800120static inline int is_imx27_cspi(struct spi_imx_data *d)
121{
122 return d->devtype_data->devtype == IMX27_CSPI;
123}
124
125static inline int is_imx35_cspi(struct spi_imx_data *d)
126{
127 return d->devtype_data->devtype == IMX35_CSPI;
128}
129
Anton Bondarenkof8a876172015-12-05 17:57:02 +0100130static inline int is_imx51_ecspi(struct spi_imx_data *d)
131{
132 return d->devtype_data->devtype == IMX51_ECSPI;
133}
134
jiada wang26e4bb82017-06-08 14:16:01 +0900135static inline int is_imx53_ecspi(struct spi_imx_data *d)
136{
137 return d->devtype_data->devtype == IMX53_ECSPI;
138}
139
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700140#define MXC_SPI_BUF_RX(type) \
141static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
142{ \
143 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
144 \
145 if (spi_imx->rx_buf) { \
146 *(type *)spi_imx->rx_buf = val; \
147 spi_imx->rx_buf += sizeof(type); \
148 } \
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200149 \
150 spi_imx->remainder -= sizeof(type); \
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700151}
152
153#define MXC_SPI_BUF_TX(type) \
154static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
155{ \
156 type val = 0; \
157 \
158 if (spi_imx->tx_buf) { \
159 val = *(type *)spi_imx->tx_buf; \
160 spi_imx->tx_buf += sizeof(type); \
161 } \
162 \
163 spi_imx->count -= sizeof(type); \
164 \
165 writel(val, spi_imx->base + MXC_CSPITXDATA); \
166}
167
168MXC_SPI_BUF_RX(u8)
169MXC_SPI_BUF_TX(u8)
170MXC_SPI_BUF_RX(u16)
171MXC_SPI_BUF_TX(u16)
172MXC_SPI_BUF_RX(u32)
173MXC_SPI_BUF_TX(u32)
174
175/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
176 * (which is currently not the case in this driver)
177 */
178static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
179 256, 384, 512, 768, 1024};
180
181/* MX21, MX27 */
182static unsigned int spi_imx_clkdiv_1(unsigned int fin,
Robert Baldyga32df9ff2016-11-01 22:18:39 +0100183 unsigned int fspi, unsigned int max, unsigned int *fres)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700184{
Shawn Guo04ee5852011-07-10 01:16:39 +0800185 int i;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700186
187 for (i = 2; i < max; i++)
188 if (fspi * mxc_clkdivs[i] >= fin)
Robert Baldyga32df9ff2016-11-01 22:18:39 +0100189 break;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700190
Robert Baldyga32df9ff2016-11-01 22:18:39 +0100191 *fres = fin / mxc_clkdivs[i];
192 return i;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700193}
194
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200195/* MX1, MX31, MX35, MX51 CSPI */
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700196static unsigned int spi_imx_clkdiv_2(unsigned int fin,
Martin Kaiser2636ba82016-09-01 22:38:40 +0200197 unsigned int fspi, unsigned int *fres)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700198{
199 int i, div = 4;
200
201 for (i = 0; i < 7; i++) {
202 if (fspi * div >= fin)
Martin Kaiser2636ba82016-09-01 22:38:40 +0200203 goto out;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700204 div <<= 1;
205 }
206
Martin Kaiser2636ba82016-09-01 22:38:40 +0200207out:
208 *fres = fin / div;
209 return i;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700210}
211
Sascha Hauer2e312f62017-06-02 07:38:04 +0200212static int spi_imx_bytes_per_word(const int bits_per_word)
Anton Bondarenkof12ae172016-02-24 09:20:29 +0100213{
Maxime Chevallierafb272082018-07-17 16:31:52 +0200214 if (bits_per_word <= 8)
215 return 1;
216 else if (bits_per_word <= 16)
217 return 2;
218 else
219 return 4;
Anton Bondarenkof12ae172016-02-24 09:20:29 +0100220}
221
Robin Gongf62cacc2014-09-11 09:18:44 +0800222static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
223 struct spi_transfer *transfer)
224{
225 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
226
Robin Gong7a908832020-06-17 06:42:09 +0800227 if (!use_dma || master->fallback)
Trent Piepho0a9c8992019-03-04 23:02:36 +0000228 return false;
229
Anton Bondarenkof12ae172016-02-24 09:20:29 +0100230 if (!master->dma_rx)
231 return false;
232
jiada wang71abd292017-09-05 14:12:32 +0900233 if (spi_imx->slave_mode)
234 return false;
235
Robin Gong133eb8e2018-10-10 10:32:48 +0000236 if (transfer->len < spi_imx->devtype_data->fifo_size)
237 return false;
238
jiada wang1673c812017-08-10 13:50:08 +0900239 spi_imx->dynamic_burst = 0;
Anton Bondarenkof12ae172016-02-24 09:20:29 +0100240
241 return true;
Robin Gongf62cacc2014-09-11 09:18:44 +0800242}
243
Shawn Guo66de7572011-07-10 01:16:37 +0800244#define MX51_ECSPI_CTRL 0x08
245#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
246#define MX51_ECSPI_CTRL_XCH (1 << 2)
Robin Gongf62cacc2014-09-11 09:18:44 +0800247#define MX51_ECSPI_CTRL_SMC (1 << 3)
Shawn Guo66de7572011-07-10 01:16:37 +0800248#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
Leif Middelschultef72efa72017-04-23 21:19:58 +0200249#define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
Shawn Guo66de7572011-07-10 01:16:37 +0800250#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
251#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
252#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
253#define MX51_ECSPI_CTRL_BL_OFFSET 20
jiada wang1673c812017-08-10 13:50:08 +0900254#define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200255
Shawn Guo66de7572011-07-10 01:16:37 +0800256#define MX51_ECSPI_CONFIG 0x0c
257#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
258#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
259#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
260#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
Knut Wohlrabc09b8902012-09-25 13:21:57 +0200261#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200262
Shawn Guo66de7572011-07-10 01:16:37 +0800263#define MX51_ECSPI_INT 0x10
264#define MX51_ECSPI_INT_TEEN (1 << 0)
265#define MX51_ECSPI_INT_RREN (1 << 3)
jiada wang71abd292017-09-05 14:12:32 +0900266#define MX51_ECSPI_INT_RDREN (1 << 4)
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200267
Uwe Kleine-König30d67142018-11-30 07:47:07 +0100268#define MX51_ECSPI_DMA 0x14
Sascha Hauerd629c2a2016-02-24 09:20:31 +0100269#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
270#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
271#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
Robin Gongf62cacc2014-09-11 09:18:44 +0800272
Sascha Hauer2b0fd062016-02-24 09:20:27 +0100273#define MX51_ECSPI_DMA_TEDEN (1 << 7)
274#define MX51_ECSPI_DMA_RXDEN (1 << 23)
275#define MX51_ECSPI_DMA_RXTDEN (1 << 31)
Robin Gongf62cacc2014-09-11 09:18:44 +0800276
Shawn Guo66de7572011-07-10 01:16:37 +0800277#define MX51_ECSPI_STAT 0x18
278#define MX51_ECSPI_STAT_RR (1 << 3)
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200279
Fabio Estevam9f6aa422015-12-03 23:23:24 -0200280#define MX51_ECSPI_TESTREG 0x20
281#define MX51_ECSPI_TESTREG_LBC BIT(31)
282
jiada wang1673c812017-08-10 13:50:08 +0900283static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
284{
285 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
Arnd Bergmann5904c9d2017-08-23 15:34:43 +0200286#ifdef __LITTLE_ENDIAN
jiada wang1673c812017-08-10 13:50:08 +0900287 unsigned int bytes_per_word;
Arnd Bergmann5904c9d2017-08-23 15:34:43 +0200288#endif
jiada wang1673c812017-08-10 13:50:08 +0900289
290 if (spi_imx->rx_buf) {
291#ifdef __LITTLE_ENDIAN
292 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
293 if (bytes_per_word == 1)
294 val = cpu_to_be32(val);
295 else if (bytes_per_word == 2)
296 val = (val << 16) | (val >> 16);
297#endif
jiada wang1673c812017-08-10 13:50:08 +0900298 *(u32 *)spi_imx->rx_buf = val;
299 spi_imx->rx_buf += sizeof(u32);
300 }
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200301
302 spi_imx->remainder -= sizeof(u32);
jiada wang1673c812017-08-10 13:50:08 +0900303}
304
305static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
306{
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200307 int unaligned;
308 u32 val;
jiada wang1673c812017-08-10 13:50:08 +0900309
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200310 unaligned = spi_imx->remainder % 4;
311
312 if (!unaligned) {
jiada wang1673c812017-08-10 13:50:08 +0900313 spi_imx_buf_rx_swap_u32(spi_imx);
314 return;
315 }
316
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200317 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
jiada wang1673c812017-08-10 13:50:08 +0900318 spi_imx_buf_rx_u16(spi_imx);
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200319 return;
320 }
321
322 val = readl(spi_imx->base + MXC_CSPIRXDATA);
323
324 while (unaligned--) {
325 if (spi_imx->rx_buf) {
326 *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
327 spi_imx->rx_buf++;
328 }
329 spi_imx->remainder--;
330 }
jiada wang1673c812017-08-10 13:50:08 +0900331}
332
333static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
334{
335 u32 val = 0;
Arnd Bergmann5904c9d2017-08-23 15:34:43 +0200336#ifdef __LITTLE_ENDIAN
jiada wang1673c812017-08-10 13:50:08 +0900337 unsigned int bytes_per_word;
Arnd Bergmann5904c9d2017-08-23 15:34:43 +0200338#endif
jiada wang1673c812017-08-10 13:50:08 +0900339
340 if (spi_imx->tx_buf) {
341 val = *(u32 *)spi_imx->tx_buf;
jiada wang1673c812017-08-10 13:50:08 +0900342 spi_imx->tx_buf += sizeof(u32);
343 }
344
345 spi_imx->count -= sizeof(u32);
346#ifdef __LITTLE_ENDIAN
347 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
348
349 if (bytes_per_word == 1)
350 val = cpu_to_be32(val);
351 else if (bytes_per_word == 2)
352 val = (val << 16) | (val >> 16);
353#endif
354 writel(val, spi_imx->base + MXC_CSPITXDATA);
355}
356
357static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
358{
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200359 int unaligned;
360 u32 val = 0;
jiada wang1673c812017-08-10 13:50:08 +0900361
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200362 unaligned = spi_imx->count % 4;
jiada wang1673c812017-08-10 13:50:08 +0900363
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200364 if (!unaligned) {
jiada wang1673c812017-08-10 13:50:08 +0900365 spi_imx_buf_tx_swap_u32(spi_imx);
366 return;
367 }
368
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200369 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
jiada wang1673c812017-08-10 13:50:08 +0900370 spi_imx_buf_tx_u16(spi_imx);
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200371 return;
372 }
373
374 while (unaligned--) {
375 if (spi_imx->tx_buf) {
376 val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
377 spi_imx->tx_buf++;
378 }
379 spi_imx->count--;
380 }
381
382 writel(val, spi_imx->base + MXC_CSPITXDATA);
jiada wang1673c812017-08-10 13:50:08 +0900383}
384
jiada wang71abd292017-09-05 14:12:32 +0900385static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
386{
387 u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
388
389 if (spi_imx->rx_buf) {
390 int n_bytes = spi_imx->slave_burst % sizeof(val);
391
392 if (!n_bytes)
393 n_bytes = sizeof(val);
394
395 memcpy(spi_imx->rx_buf,
396 ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
397
398 spi_imx->rx_buf += n_bytes;
399 spi_imx->slave_burst -= n_bytes;
400 }
Maxime Chevallier2ca300a2018-07-17 16:31:54 +0200401
402 spi_imx->remainder -= sizeof(u32);
jiada wang71abd292017-09-05 14:12:32 +0900403}
404
405static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
406{
407 u32 val = 0;
408 int n_bytes = spi_imx->count % sizeof(val);
409
410 if (!n_bytes)
411 n_bytes = sizeof(val);
412
413 if (spi_imx->tx_buf) {
414 memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
415 spi_imx->tx_buf, n_bytes);
416 val = cpu_to_be32(val);
417 spi_imx->tx_buf += n_bytes;
418 }
419
420 spi_imx->count -= n_bytes;
421
422 writel(val, spi_imx->base + MXC_CSPITXDATA);
423}
424
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200425/* MX51 eCSPI */
Sascha Hauer6aa800c2016-02-17 14:28:48 +0100426static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
427 unsigned int fspi, unsigned int *fres)
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200428{
429 /*
430 * there are two 4-bit dividers, the pre-divider divides by
431 * $pre, the post-divider by 2^$post
432 */
433 unsigned int pre, post;
Sascha Hauer6aa800c2016-02-17 14:28:48 +0100434 unsigned int fin = spi_imx->spi_clk;
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200435
436 if (unlikely(fspi > fin))
437 return 0;
438
439 post = fls(fin) - fls(fspi);
440 if (fin > fspi << post)
441 post++;
442
443 /* now we have: (fin <= fspi << post) with post being minimal */
444
445 post = max(4U, post) - 4;
446 if (unlikely(post > 0xf)) {
Sascha Hauer6aa800c2016-02-17 14:28:48 +0100447 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
448 fspi, fin);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200449 return 0xff;
450 }
451
452 pre = DIV_ROUND_UP(fin, fspi << post) - 1;
453
Sascha Hauer6aa800c2016-02-17 14:28:48 +0100454 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200455 __func__, fin, fspi, post, pre);
Marek Vasut6fd8b852013-12-18 18:31:47 +0100456
457 /* Resulting frequency for the SCLK line. */
458 *fres = (fin / (pre + 1)) >> post;
459
Shawn Guo66de7572011-07-10 01:16:37 +0800460 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
461 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200462}
463
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300464static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200465{
466 unsigned val = 0;
467
468 if (enable & MXC_INT_TE)
Shawn Guo66de7572011-07-10 01:16:37 +0800469 val |= MX51_ECSPI_INT_TEEN;
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200470
471 if (enable & MXC_INT_RR)
Shawn Guo66de7572011-07-10 01:16:37 +0800472 val |= MX51_ECSPI_INT_RREN;
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200473
jiada wang71abd292017-09-05 14:12:32 +0900474 if (enable & MXC_INT_RDR)
475 val |= MX51_ECSPI_INT_RDREN;
476
Shawn Guo66de7572011-07-10 01:16:37 +0800477 writel(val, spi_imx->base + MX51_ECSPI_INT);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200478}
479
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300480static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200481{
Sascha Hauerb03c3882016-02-24 09:20:32 +0100482 u32 reg;
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200483
Sascha Hauerb03c3882016-02-24 09:20:32 +0100484 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
485 reg |= MX51_ECSPI_CTRL_XCH;
Shawn Guo66de7572011-07-10 01:16:37 +0800486 writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200487}
488
Robin Gongbcd8e772020-05-21 04:34:17 +0800489static void mx51_disable_dma(struct spi_imx_data *spi_imx)
490{
491 writel(0, spi_imx->base + MX51_ECSPI_DMA);
492}
493
jiada wang71abd292017-09-05 14:12:32 +0900494static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
495{
496 u32 ctrl;
497
498 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
499 ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
500 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
501}
502
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100503static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
504 struct spi_message *msg)
505{
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100506 struct spi_device *spi = msg->spi;
Knut Wohlrab793c7f92016-03-15 14:24:36 +0100507 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100508 u32 testreg;
Knut Wohlrab793c7f92016-03-15 14:24:36 +0100509 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200510
jiada wang71abd292017-09-05 14:12:32 +0900511 /* set Master or Slave mode */
512 if (spi_imx->slave_mode)
513 ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
514 else
515 ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200516
Leif Middelschultef72efa72017-04-23 21:19:58 +0200517 /*
518 * Enable SPI_RDY handling (falling edge/level triggered).
519 */
520 if (spi->mode & SPI_READY)
521 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
522
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200523 /* set chip select to use */
Alexander Shiyanb36581d2016-06-08 20:02:06 +0300524 ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200525
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100526 /*
527 * The ctrl register must be written first, with the EN bit set other
528 * registers must not be written to.
529 */
530 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
531
532 testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
533 if (spi->mode & SPI_LOOP)
534 testreg |= MX51_ECSPI_TESTREG_LBC;
jiada wang71abd292017-09-05 14:12:32 +0900535 else
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100536 testreg &= ~MX51_ECSPI_TESTREG_LBC;
537 writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200538
jiada wang71abd292017-09-05 14:12:32 +0900539 /*
540 * eCSPI burst completion by Chip Select signal in Slave mode
541 * is not functional for imx53 Soc, config SPI burst completed when
542 * BURST_LENGTH + 1 bits are received
543 */
544 if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
545 cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
546 else
547 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200548
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300549 if (spi->mode & SPI_CPHA)
Alexander Shiyanb36581d2016-06-08 20:02:06 +0300550 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
Knut Wohlrab793c7f92016-03-15 14:24:36 +0100551 else
Alexander Shiyanb36581d2016-06-08 20:02:06 +0300552 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200553
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300554 if (spi->mode & SPI_CPOL) {
Alexander Shiyanb36581d2016-06-08 20:02:06 +0300555 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
556 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
Knut Wohlrab793c7f92016-03-15 14:24:36 +0100557 } else {
Alexander Shiyanb36581d2016-06-08 20:02:06 +0300558 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
559 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
Knut Wohlrabc09b8902012-09-25 13:21:57 +0200560 }
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100561
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300562 if (spi->mode & SPI_CS_HIGH)
Alexander Shiyanb36581d2016-06-08 20:02:06 +0300563 cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
Knut Wohlrab793c7f92016-03-15 14:24:36 +0100564 else
Alexander Shiyanb36581d2016-06-08 20:02:06 +0300565 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200566
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100567 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
568
569 return 0;
570}
571
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100572static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
573 struct spi_device *spi,
574 struct spi_transfer *t)
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100575{
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100576 u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
Uwe Kleine-König3f757202018-11-30 07:47:09 +0100577 u32 clk = t->speed_hz, delay;
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100578
579 /* Clear BL field and set the right value */
580 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
581 if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
582 ctrl |= (spi_imx->slave_burst * 8 - 1)
583 << MX51_ECSPI_CTRL_BL_OFFSET;
584 else
585 ctrl |= (spi_imx->bits_per_word - 1)
586 << MX51_ECSPI_CTRL_BL_OFFSET;
587
588 /* set clock speed */
589 ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
590 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
Uwe Kleine-König3f757202018-11-30 07:47:09 +0100591 ctrl |= mx51_ecspi_clkdiv(spi_imx, t->speed_hz, &clk);
Uwe Kleine-König00b80ac2018-11-30 07:47:06 +0100592 spi_imx->spi_bus_clk = clk;
593
Sascha Hauerb03c3882016-02-24 09:20:32 +0100594 if (spi_imx->usedma)
595 ctrl |= MX51_ECSPI_CTRL_SMC;
596
Anton Bondarenkof677f172015-12-08 07:43:43 +0100597 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
598
Marek Vasut6fd8b852013-12-18 18:31:47 +0100599 /*
600 * Wait until the changes in the configuration register CONFIGREG
601 * propagate into the hardware. It takes exactly one tick of the
602 * SCLK clock, but we will wait two SCLK clock just to be sure. The
603 * effect of the delay it takes for the hardware to apply changes
604 * is noticable if the SCLK clock run very slow. In such a case, if
605 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
606 * be asserted before the SCLK polarity changes, which would disrupt
607 * the SPI communication as the device on the other end would consider
608 * the change of SCLK polarity as a clock tick already.
609 */
610 delay = (2 * 1000000) / clk;
611 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
612 udelay(delay);
613 else /* SCLK is _very_ slow */
614 usleep_range(delay, delay + 10);
615
Robin Gong987a2df2018-10-10 10:32:42 +0000616 return 0;
617}
618
619static void mx51_setup_wml(struct spi_imx_data *spi_imx)
620{
Robin Gongf62cacc2014-09-11 09:18:44 +0800621 /*
622 * Configure the DMA register: setup the watermark
623 * and enable DMA request.
624 */
Robin Gong5ba5a372018-10-10 10:32:45 +0000625 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
Sascha Hauerd629c2a2016-02-24 09:20:31 +0100626 MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
627 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
Sascha Hauer2b0fd062016-02-24 09:20:27 +0100628 MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
629 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200630}
631
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300632static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200633{
Shawn Guo66de7572011-07-10 01:16:37 +0800634 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200635}
636
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300637static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200638{
639 /* drain receive buffer */
Shawn Guo66de7572011-07-10 01:16:37 +0800640 while (mx51_ecspi_rx_available(spi_imx))
Uwe Kleine-König0b599602010-09-09 21:02:48 +0200641 readl(spi_imx->base + MXC_CSPIRXDATA);
642}
643
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700644#define MX31_INTREG_TEEN (1 << 0)
645#define MX31_INTREG_RREN (1 << 3)
646
647#define MX31_CSPICTRL_ENABLE (1 << 0)
648#define MX31_CSPICTRL_MASTER (1 << 1)
649#define MX31_CSPICTRL_XCH (1 << 2)
Martin Kaiser2dd33f92016-10-20 00:42:25 +0200650#define MX31_CSPICTRL_SMC (1 << 3)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700651#define MX31_CSPICTRL_POL (1 << 4)
652#define MX31_CSPICTRL_PHA (1 << 5)
653#define MX31_CSPICTRL_SSCTL (1 << 6)
654#define MX31_CSPICTRL_SSPOL (1 << 7)
655#define MX31_CSPICTRL_BC_SHIFT 8
656#define MX35_CSPICTRL_BL_SHIFT 20
657#define MX31_CSPICTRL_CS_SHIFT 24
658#define MX35_CSPICTRL_CS_SHIFT 12
659#define MX31_CSPICTRL_DR_SHIFT 16
660
Martin Kaiser2dd33f92016-10-20 00:42:25 +0200661#define MX31_CSPI_DMAREG 0x10
662#define MX31_DMAREG_RH_DEN (1<<4)
663#define MX31_DMAREG_TH_DEN (1<<1)
664
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700665#define MX31_CSPISTATUS 0x14
666#define MX31_STATUS_RR (1 << 3)
667
Martin Kaiser15ca9212016-09-01 22:39:58 +0200668#define MX31_CSPI_TESTREG 0x1C
669#define MX31_TEST_LBC (1 << 14)
670
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700671/* These functions also work for the i.MX35, but be aware that
672 * the i.MX35 has a slightly different register layout for bits
673 * we do not use here.
674 */
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300675static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700676{
677 unsigned int val = 0;
678
679 if (enable & MXC_INT_TE)
680 val |= MX31_INTREG_TEEN;
681 if (enable & MXC_INT_RR)
682 val |= MX31_INTREG_RREN;
683
684 writel(val, spi_imx->base + MXC_CSPIINT);
685}
686
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300687static void mx31_trigger(struct spi_imx_data *spi_imx)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700688{
689 unsigned int reg;
690
691 reg = readl(spi_imx->base + MXC_CSPICTRL);
692 reg |= MX31_CSPICTRL_XCH;
693 writel(reg, spi_imx->base + MXC_CSPICTRL);
694}
695
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100696static int mx31_prepare_message(struct spi_imx_data *spi_imx,
697 struct spi_message *msg)
698{
699 return 0;
700}
701
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100702static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
703 struct spi_device *spi,
704 struct spi_transfer *t)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700705{
706 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
Martin Kaiser2636ba82016-09-01 22:38:40 +0200707 unsigned int clk;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700708
Uwe Kleine-König3f757202018-11-30 07:47:09 +0100709 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) <<
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700710 MX31_CSPICTRL_DR_SHIFT;
Martin Kaiser2636ba82016-09-01 22:38:40 +0200711 spi_imx->spi_bus_clk = clk;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700712
Shawn Guo04ee5852011-07-10 01:16:39 +0800713 if (is_imx35_cspi(spi_imx)) {
Sascha Hauerd52345b2017-06-02 07:38:01 +0200714 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
Shawn Guo2a64a902011-07-10 01:16:38 +0800715 reg |= MX31_CSPICTRL_SSCTL;
716 } else {
Sascha Hauerd52345b2017-06-02 07:38:01 +0200717 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
Shawn Guo2a64a902011-07-10 01:16:38 +0800718 }
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700719
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300720 if (spi->mode & SPI_CPHA)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700721 reg |= MX31_CSPICTRL_PHA;
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300722 if (spi->mode & SPI_CPOL)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700723 reg |= MX31_CSPICTRL_POL;
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300724 if (spi->mode & SPI_CS_HIGH)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700725 reg |= MX31_CSPICTRL_SSPOL;
Greg Ungerer602c8f42017-07-11 14:22:11 +1000726 if (!gpio_is_valid(spi->cs_gpio))
727 reg |= (spi->chip_select) <<
Shawn Guo04ee5852011-07-10 01:16:39 +0800728 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
729 MX31_CSPICTRL_CS_SHIFT);
Uwe Kleine-König1723e662010-09-10 09:19:18 +0200730
Martin Kaiser2dd33f92016-10-20 00:42:25 +0200731 if (spi_imx->usedma)
732 reg |= MX31_CSPICTRL_SMC;
733
Uwe Kleine-König1723e662010-09-10 09:19:18 +0200734 writel(reg, spi_imx->base + MXC_CSPICTRL);
735
Martin Kaiser15ca9212016-09-01 22:39:58 +0200736 reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
737 if (spi->mode & SPI_LOOP)
738 reg |= MX31_TEST_LBC;
739 else
740 reg &= ~MX31_TEST_LBC;
741 writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
742
Martin Kaiser2dd33f92016-10-20 00:42:25 +0200743 if (spi_imx->usedma) {
Uwe Kleine-König30d67142018-11-30 07:47:07 +0100744 /*
745 * configure DMA requests when RXFIFO is half full and
746 * when TXFIFO is half empty
747 */
Martin Kaiser2dd33f92016-10-20 00:42:25 +0200748 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
749 spi_imx->base + MX31_CSPI_DMAREG);
750 }
751
Uwe Kleine-König1723e662010-09-10 09:19:18 +0200752 return 0;
753}
754
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300755static int mx31_rx_available(struct spi_imx_data *spi_imx)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700756{
757 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
758}
759
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300760static void mx31_reset(struct spi_imx_data *spi_imx)
Uwe Kleine-König1723e662010-09-10 09:19:18 +0200761{
762 /* drain receive buffer */
Shawn Guo2a64a902011-07-10 01:16:38 +0800763 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
Uwe Kleine-König1723e662010-09-10 09:19:18 +0200764 readl(spi_imx->base + MXC_CSPIRXDATA);
765}
766
Shawn Guo3451fb12011-07-10 01:16:36 +0800767#define MX21_INTREG_RR (1 << 4)
768#define MX21_INTREG_TEEN (1 << 9)
769#define MX21_INTREG_RREN (1 << 13)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700770
Shawn Guo3451fb12011-07-10 01:16:36 +0800771#define MX21_CSPICTRL_POL (1 << 5)
772#define MX21_CSPICTRL_PHA (1 << 6)
773#define MX21_CSPICTRL_SSPOL (1 << 8)
774#define MX21_CSPICTRL_XCH (1 << 9)
775#define MX21_CSPICTRL_ENABLE (1 << 10)
776#define MX21_CSPICTRL_MASTER (1 << 11)
777#define MX21_CSPICTRL_DR_SHIFT 14
778#define MX21_CSPICTRL_CS_SHIFT 19
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700779
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300780static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700781{
782 unsigned int val = 0;
783
784 if (enable & MXC_INT_TE)
Shawn Guo3451fb12011-07-10 01:16:36 +0800785 val |= MX21_INTREG_TEEN;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700786 if (enable & MXC_INT_RR)
Shawn Guo3451fb12011-07-10 01:16:36 +0800787 val |= MX21_INTREG_RREN;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700788
789 writel(val, spi_imx->base + MXC_CSPIINT);
790}
791
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300792static void mx21_trigger(struct spi_imx_data *spi_imx)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700793{
794 unsigned int reg;
795
796 reg = readl(spi_imx->base + MXC_CSPICTRL);
Shawn Guo3451fb12011-07-10 01:16:36 +0800797 reg |= MX21_CSPICTRL_XCH;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700798 writel(reg, spi_imx->base + MXC_CSPICTRL);
799}
800
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100801static int mx21_prepare_message(struct spi_imx_data *spi_imx,
802 struct spi_message *msg)
803{
804 return 0;
805}
806
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100807static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
808 struct spi_device *spi,
809 struct spi_transfer *t)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700810{
Shawn Guo3451fb12011-07-10 01:16:36 +0800811 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
Shawn Guo04ee5852011-07-10 01:16:39 +0800812 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
Robert Baldyga32df9ff2016-11-01 22:18:39 +0100813 unsigned int clk;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700814
Uwe Kleine-König3f757202018-11-30 07:47:09 +0100815 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, t->speed_hz, max, &clk)
Robert Baldyga32df9ff2016-11-01 22:18:39 +0100816 << MX21_CSPICTRL_DR_SHIFT;
817 spi_imx->spi_bus_clk = clk;
818
Sascha Hauerd52345b2017-06-02 07:38:01 +0200819 reg |= spi_imx->bits_per_word - 1;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700820
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300821 if (spi->mode & SPI_CPHA)
Shawn Guo3451fb12011-07-10 01:16:36 +0800822 reg |= MX21_CSPICTRL_PHA;
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300823 if (spi->mode & SPI_CPOL)
Shawn Guo3451fb12011-07-10 01:16:36 +0800824 reg |= MX21_CSPICTRL_POL;
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300825 if (spi->mode & SPI_CS_HIGH)
Shawn Guo3451fb12011-07-10 01:16:36 +0800826 reg |= MX21_CSPICTRL_SSPOL;
Greg Ungerer602c8f42017-07-11 14:22:11 +1000827 if (!gpio_is_valid(spi->cs_gpio))
828 reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700829
830 writel(reg, spi_imx->base + MXC_CSPICTRL);
831
832 return 0;
833}
834
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300835static int mx21_rx_available(struct spi_imx_data *spi_imx)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700836{
Shawn Guo3451fb12011-07-10 01:16:36 +0800837 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700838}
839
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300840static void mx21_reset(struct spi_imx_data *spi_imx)
Uwe Kleine-König1723e662010-09-10 09:19:18 +0200841{
842 writel(1, spi_imx->base + MXC_RESET);
843}
844
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700845#define MX1_INTREG_RR (1 << 3)
846#define MX1_INTREG_TEEN (1 << 8)
847#define MX1_INTREG_RREN (1 << 11)
848
849#define MX1_CSPICTRL_POL (1 << 4)
850#define MX1_CSPICTRL_PHA (1 << 5)
851#define MX1_CSPICTRL_XCH (1 << 8)
852#define MX1_CSPICTRL_ENABLE (1 << 9)
853#define MX1_CSPICTRL_MASTER (1 << 10)
854#define MX1_CSPICTRL_DR_SHIFT 13
855
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300856static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700857{
858 unsigned int val = 0;
859
860 if (enable & MXC_INT_TE)
861 val |= MX1_INTREG_TEEN;
862 if (enable & MXC_INT_RR)
863 val |= MX1_INTREG_RREN;
864
865 writel(val, spi_imx->base + MXC_CSPIINT);
866}
867
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300868static void mx1_trigger(struct spi_imx_data *spi_imx)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700869{
870 unsigned int reg;
871
872 reg = readl(spi_imx->base + MXC_CSPICTRL);
873 reg |= MX1_CSPICTRL_XCH;
874 writel(reg, spi_imx->base + MXC_CSPICTRL);
875}
876
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100877static int mx1_prepare_message(struct spi_imx_data *spi_imx,
878 struct spi_message *msg)
879{
880 return 0;
881}
882
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100883static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
884 struct spi_device *spi,
885 struct spi_transfer *t)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700886{
887 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
Martin Kaiser2636ba82016-09-01 22:38:40 +0200888 unsigned int clk;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700889
Uwe Kleine-König3f757202018-11-30 07:47:09 +0100890 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) <<
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700891 MX1_CSPICTRL_DR_SHIFT;
Martin Kaiser2636ba82016-09-01 22:38:40 +0200892 spi_imx->spi_bus_clk = clk;
893
Sascha Hauerd52345b2017-06-02 07:38:01 +0200894 reg |= spi_imx->bits_per_word - 1;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700895
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300896 if (spi->mode & SPI_CPHA)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700897 reg |= MX1_CSPICTRL_PHA;
Alexander Shiyanc0c7a5d2016-06-08 20:02:07 +0300898 if (spi->mode & SPI_CPOL)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700899 reg |= MX1_CSPICTRL_POL;
900
901 writel(reg, spi_imx->base + MXC_CSPICTRL);
902
903 return 0;
904}
905
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300906static int mx1_rx_available(struct spi_imx_data *spi_imx)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -0700907{
908 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
909}
910
Alexander Shiyanf989bc62016-06-08 20:02:08 +0300911static void mx1_reset(struct spi_imx_data *spi_imx)
Uwe Kleine-König1723e662010-09-10 09:19:18 +0200912{
913 writel(1, spi_imx->base + MXC_RESET);
914}
915
Shawn Guo04ee5852011-07-10 01:16:39 +0800916static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
917 .intctrl = mx1_intctrl,
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100918 .prepare_message = mx1_prepare_message,
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100919 .prepare_transfer = mx1_prepare_transfer,
Shawn Guo04ee5852011-07-10 01:16:39 +0800920 .trigger = mx1_trigger,
921 .rx_available = mx1_rx_available,
922 .reset = mx1_reset,
jiada wangfd8d4e22017-06-08 14:16:00 +0900923 .fifo_size = 8,
924 .has_dmamode = false,
jiada wang1673c812017-08-10 13:50:08 +0900925 .dynamic_burst = false,
jiada wang71abd292017-09-05 14:12:32 +0900926 .has_slavemode = false,
Shawn Guo04ee5852011-07-10 01:16:39 +0800927 .devtype = IMX1_CSPI,
928};
929
930static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
931 .intctrl = mx21_intctrl,
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100932 .prepare_message = mx21_prepare_message,
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100933 .prepare_transfer = mx21_prepare_transfer,
Shawn Guo04ee5852011-07-10 01:16:39 +0800934 .trigger = mx21_trigger,
935 .rx_available = mx21_rx_available,
936 .reset = mx21_reset,
jiada wangfd8d4e22017-06-08 14:16:00 +0900937 .fifo_size = 8,
938 .has_dmamode = false,
jiada wang1673c812017-08-10 13:50:08 +0900939 .dynamic_burst = false,
jiada wang71abd292017-09-05 14:12:32 +0900940 .has_slavemode = false,
Shawn Guo04ee5852011-07-10 01:16:39 +0800941 .devtype = IMX21_CSPI,
942};
943
944static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
945 /* i.mx27 cspi shares the functions with i.mx21 one */
946 .intctrl = mx21_intctrl,
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100947 .prepare_message = mx21_prepare_message,
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100948 .prepare_transfer = mx21_prepare_transfer,
Shawn Guo04ee5852011-07-10 01:16:39 +0800949 .trigger = mx21_trigger,
950 .rx_available = mx21_rx_available,
951 .reset = mx21_reset,
jiada wangfd8d4e22017-06-08 14:16:00 +0900952 .fifo_size = 8,
953 .has_dmamode = false,
jiada wang1673c812017-08-10 13:50:08 +0900954 .dynamic_burst = false,
jiada wang71abd292017-09-05 14:12:32 +0900955 .has_slavemode = false,
Shawn Guo04ee5852011-07-10 01:16:39 +0800956 .devtype = IMX27_CSPI,
957};
958
959static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
960 .intctrl = mx31_intctrl,
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100961 .prepare_message = mx31_prepare_message,
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100962 .prepare_transfer = mx31_prepare_transfer,
Shawn Guo04ee5852011-07-10 01:16:39 +0800963 .trigger = mx31_trigger,
964 .rx_available = mx31_rx_available,
965 .reset = mx31_reset,
jiada wangfd8d4e22017-06-08 14:16:00 +0900966 .fifo_size = 8,
967 .has_dmamode = false,
jiada wang1673c812017-08-10 13:50:08 +0900968 .dynamic_burst = false,
jiada wang71abd292017-09-05 14:12:32 +0900969 .has_slavemode = false,
Shawn Guo04ee5852011-07-10 01:16:39 +0800970 .devtype = IMX31_CSPI,
971};
972
973static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
974 /* i.mx35 and later cspi shares the functions with i.mx31 one */
975 .intctrl = mx31_intctrl,
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100976 .prepare_message = mx31_prepare_message,
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100977 .prepare_transfer = mx31_prepare_transfer,
Shawn Guo04ee5852011-07-10 01:16:39 +0800978 .trigger = mx31_trigger,
979 .rx_available = mx31_rx_available,
980 .reset = mx31_reset,
jiada wangfd8d4e22017-06-08 14:16:00 +0900981 .fifo_size = 8,
982 .has_dmamode = true,
jiada wang1673c812017-08-10 13:50:08 +0900983 .dynamic_burst = false,
jiada wang71abd292017-09-05 14:12:32 +0900984 .has_slavemode = false,
Shawn Guo04ee5852011-07-10 01:16:39 +0800985 .devtype = IMX35_CSPI,
986};
987
988static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
989 .intctrl = mx51_ecspi_intctrl,
Uwe Kleine-Könige6972712018-11-30 07:47:05 +0100990 .prepare_message = mx51_ecspi_prepare_message,
Uwe Kleine-König1d374702018-11-30 07:47:08 +0100991 .prepare_transfer = mx51_ecspi_prepare_transfer,
Shawn Guo04ee5852011-07-10 01:16:39 +0800992 .trigger = mx51_ecspi_trigger,
993 .rx_available = mx51_ecspi_rx_available,
994 .reset = mx51_ecspi_reset,
Robin Gong987a2df2018-10-10 10:32:42 +0000995 .setup_wml = mx51_setup_wml,
Robin Gongbcd8e772020-05-21 04:34:17 +0800996 .disable_dma = mx51_disable_dma,
jiada wangfd8d4e22017-06-08 14:16:00 +0900997 .fifo_size = 64,
998 .has_dmamode = true,
jiada wang1673c812017-08-10 13:50:08 +0900999 .dynamic_burst = true,
jiada wang71abd292017-09-05 14:12:32 +09001000 .has_slavemode = true,
1001 .disable = mx51_ecspi_disable,
Shawn Guo04ee5852011-07-10 01:16:39 +08001002 .devtype = IMX51_ECSPI,
1003};
1004
jiada wang26e4bb82017-06-08 14:16:01 +09001005static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
1006 .intctrl = mx51_ecspi_intctrl,
Uwe Kleine-Könige6972712018-11-30 07:47:05 +01001007 .prepare_message = mx51_ecspi_prepare_message,
Uwe Kleine-König1d374702018-11-30 07:47:08 +01001008 .prepare_transfer = mx51_ecspi_prepare_transfer,
jiada wang26e4bb82017-06-08 14:16:01 +09001009 .trigger = mx51_ecspi_trigger,
1010 .rx_available = mx51_ecspi_rx_available,
Robin Gongbcd8e772020-05-21 04:34:17 +08001011 .disable_dma = mx51_disable_dma,
jiada wang26e4bb82017-06-08 14:16:01 +09001012 .reset = mx51_ecspi_reset,
1013 .fifo_size = 64,
1014 .has_dmamode = true,
jiada wang71abd292017-09-05 14:12:32 +09001015 .has_slavemode = true,
1016 .disable = mx51_ecspi_disable,
jiada wang26e4bb82017-06-08 14:16:01 +09001017 .devtype = IMX53_ECSPI,
1018};
1019
Krzysztof Kozlowskidb1b8202015-05-02 00:44:04 +09001020static const struct platform_device_id spi_imx_devtype[] = {
Shawn Guo04ee5852011-07-10 01:16:39 +08001021 {
1022 .name = "imx1-cspi",
1023 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
1024 }, {
1025 .name = "imx21-cspi",
1026 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
1027 }, {
1028 .name = "imx27-cspi",
1029 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
1030 }, {
1031 .name = "imx31-cspi",
1032 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
1033 }, {
1034 .name = "imx35-cspi",
1035 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
1036 }, {
1037 .name = "imx51-ecspi",
1038 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
1039 }, {
jiada wang26e4bb82017-06-08 14:16:01 +09001040 .name = "imx53-ecspi",
1041 .driver_data = (kernel_ulong_t) &imx53_ecspi_devtype_data,
1042 }, {
Shawn Guo04ee5852011-07-10 01:16:39 +08001043 /* sentinel */
1044 }
Uwe Kleine-Königf4ba6312010-09-09 15:29:01 +02001045};
1046
Shawn Guo22a85e42011-07-10 01:16:41 +08001047static const struct of_device_id spi_imx_dt_ids[] = {
1048 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
1049 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
1050 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
1051 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
1052 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
1053 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
jiada wang26e4bb82017-06-08 14:16:01 +09001054 { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
Shawn Guo22a85e42011-07-10 01:16:41 +08001055 { /* sentinel */ }
1056};
Niels de Vos27743e02013-07-29 09:38:05 +02001057MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
Shawn Guo22a85e42011-07-10 01:16:41 +08001058
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001059static void spi_imx_chipselect(struct spi_device *spi, int is_active)
1060{
Uwe Kleine-Könige6a0a8b2009-10-01 15:44:33 -07001061 int active = is_active != BITBANG_CS_INACTIVE;
1062 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001063
Oleksij Rempelab2f3572017-07-25 09:57:09 +02001064 if (spi->mode & SPI_NO_CS)
1065 return;
1066
Alexander Shiyanb36581d2016-06-08 20:02:06 +03001067 if (!gpio_is_valid(spi->cs_gpio))
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001068 return;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001069
Alexander Shiyanb36581d2016-06-08 20:02:06 +03001070 gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001071}
1072
Maxime Chevallier2ca300a2018-07-17 16:31:54 +02001073static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
1074{
1075 u32 ctrl;
1076
1077 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
1078 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1079 ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
1080 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
1081}
1082
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001083static void spi_imx_push(struct spi_imx_data *spi_imx)
1084{
Maxime Chevallier2ca300a2018-07-17 16:31:54 +02001085 unsigned int burst_len, fifo_words;
1086
1087 if (spi_imx->dynamic_burst)
1088 fifo_words = 4;
1089 else
1090 fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1091 /*
1092 * Reload the FIFO when the remaining bytes to be transferred in the
1093 * current burst is 0. This only applies when bits_per_word is a
1094 * multiple of 8.
1095 */
1096 if (!spi_imx->remainder) {
1097 if (spi_imx->dynamic_burst) {
1098
1099 /* We need to deal unaligned data first */
1100 burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
1101
1102 if (!burst_len)
1103 burst_len = MX51_ECSPI_CTRL_MAX_BURST;
1104
1105 spi_imx_set_burst_len(spi_imx, burst_len * 8);
1106
1107 spi_imx->remainder = burst_len;
1108 } else {
1109 spi_imx->remainder = fifo_words;
1110 }
1111 }
1112
jiada wangfd8d4e22017-06-08 14:16:00 +09001113 while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001114 if (!spi_imx->count)
1115 break;
Maxime Chevallier2ca300a2018-07-17 16:31:54 +02001116 if (spi_imx->dynamic_burst &&
Uwe Kleine-König30d67142018-11-30 07:47:07 +01001117 spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
Maxime Chevallier2ca300a2018-07-17 16:31:54 +02001118 fifo_words))
jiada wang1673c812017-08-10 13:50:08 +09001119 break;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001120 spi_imx->tx(spi_imx);
1121 spi_imx->txfifo++;
1122 }
1123
jiada wang71abd292017-09-05 14:12:32 +09001124 if (!spi_imx->slave_mode)
1125 spi_imx->devtype_data->trigger(spi_imx);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001126}
1127
1128static irqreturn_t spi_imx_isr(int irq, void *dev_id)
1129{
1130 struct spi_imx_data *spi_imx = dev_id;
1131
jiada wang71abd292017-09-05 14:12:32 +09001132 while (spi_imx->txfifo &&
1133 spi_imx->devtype_data->rx_available(spi_imx)) {
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001134 spi_imx->rx(spi_imx);
1135 spi_imx->txfifo--;
1136 }
1137
1138 if (spi_imx->count) {
1139 spi_imx_push(spi_imx);
1140 return IRQ_HANDLED;
1141 }
1142
1143 if (spi_imx->txfifo) {
1144 /* No data left to push, but still waiting for rx data,
1145 * enable receive data available interrupt.
1146 */
Shawn Guoedd501bb2011-07-10 01:16:35 +08001147 spi_imx->devtype_data->intctrl(
Uwe Kleine-Königf4ba6312010-09-09 15:29:01 +02001148 spi_imx, MXC_INT_RR);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001149 return IRQ_HANDLED;
1150 }
1151
Shawn Guoedd501bb2011-07-10 01:16:35 +08001152 spi_imx->devtype_data->intctrl(spi_imx, 0);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001153 complete(&spi_imx->xfer_done);
1154
1155 return IRQ_HANDLED;
1156}
1157
Sascha Hauer65017ee2017-06-02 07:38:03 +02001158static int spi_imx_dma_configure(struct spi_master *master)
Anton Bondarenkof12ae172016-02-24 09:20:29 +01001159{
1160 int ret;
1161 enum dma_slave_buswidth buswidth;
1162 struct dma_slave_config rx = {}, tx = {};
1163 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1164
Sascha Hauer65017ee2017-06-02 07:38:03 +02001165 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
Anton Bondarenkof12ae172016-02-24 09:20:29 +01001166 case 4:
1167 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1168 break;
1169 case 2:
1170 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
1171 break;
1172 case 1:
1173 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
1174 break;
1175 default:
1176 return -EINVAL;
1177 }
1178
1179 tx.direction = DMA_MEM_TO_DEV;
1180 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
1181 tx.dst_addr_width = buswidth;
1182 tx.dst_maxburst = spi_imx->wml;
1183 ret = dmaengine_slave_config(master->dma_tx, &tx);
1184 if (ret) {
1185 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
1186 return ret;
1187 }
1188
1189 rx.direction = DMA_DEV_TO_MEM;
1190 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
1191 rx.src_addr_width = buswidth;
1192 rx.src_maxburst = spi_imx->wml;
1193 ret = dmaengine_slave_config(master->dma_rx, &rx);
1194 if (ret) {
1195 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
1196 return ret;
1197 }
1198
Anton Bondarenkof12ae172016-02-24 09:20:29 +01001199 return 0;
1200}
1201
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001202static int spi_imx_setupxfer(struct spi_device *spi,
1203 struct spi_transfer *t)
1204{
1205 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001206
Sascha Hauerabb1ff12017-06-02 07:37:59 +02001207 if (!t)
1208 return 0;
1209
Sascha Hauerd52345b2017-06-02 07:38:01 +02001210 spi_imx->bits_per_word = t->bits_per_word;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001211
Maxime Chevallier2801b2f52018-07-17 16:31:51 +02001212 /*
1213 * Initialize the functions for transfer. To transfer non byte-aligned
1214 * words, we have to use multiple word-size bursts, we can't use
1215 * dynamic_burst in that case.
1216 */
1217 if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
1218 (spi_imx->bits_per_word == 8 ||
1219 spi_imx->bits_per_word == 16 ||
1220 spi_imx->bits_per_word == 32)) {
jiada wang1673c812017-08-10 13:50:08 +09001221
jiada wang1673c812017-08-10 13:50:08 +09001222 spi_imx->rx = spi_imx_buf_rx_swap;
1223 spi_imx->tx = spi_imx_buf_tx_swap;
1224 spi_imx->dynamic_burst = 1;
jiada wang1673c812017-08-10 13:50:08 +09001225
Sachin Kamat60514262013-05-30 13:38:09 +05301226 } else {
jiada wang1673c812017-08-10 13:50:08 +09001227 if (spi_imx->bits_per_word <= 8) {
1228 spi_imx->rx = spi_imx_buf_rx_u8;
1229 spi_imx->tx = spi_imx_buf_tx_u8;
1230 } else if (spi_imx->bits_per_word <= 16) {
1231 spi_imx->rx = spi_imx_buf_rx_u16;
1232 spi_imx->tx = spi_imx_buf_tx_u16;
1233 } else {
1234 spi_imx->rx = spi_imx_buf_rx_u32;
1235 spi_imx->tx = spi_imx_buf_tx_u32;
1236 }
Maxime Chevallier2ca300a2018-07-17 16:31:54 +02001237 spi_imx->dynamic_burst = 0;
Stephen Warren24778be2013-05-21 20:36:35 -06001238 }
Uwe Kleine-Könige6a0a8b2009-10-01 15:44:33 -07001239
Sascha Hauerc008a802016-02-24 09:20:26 +01001240 if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
zhengbine6a8b2c2019-12-24 11:52:05 +08001241 spi_imx->usedma = true;
Sascha Hauerc008a802016-02-24 09:20:26 +01001242 else
zhengbine6a8b2c2019-12-24 11:52:05 +08001243 spi_imx->usedma = false;
Sascha Hauerc008a802016-02-24 09:20:26 +01001244
jiada wang71abd292017-09-05 14:12:32 +09001245 if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
1246 spi_imx->rx = mx53_ecspi_rx_slave;
1247 spi_imx->tx = mx53_ecspi_tx_slave;
1248 spi_imx->slave_burst = t->len;
1249 }
1250
Uwe Kleine-König1d374702018-11-30 07:47:08 +01001251 spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001252
1253 return 0;
1254}
1255
Robin Gongf62cacc2014-09-11 09:18:44 +08001256static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
1257{
1258 struct spi_master *master = spi_imx->bitbang.master;
1259
1260 if (master->dma_rx) {
1261 dma_release_channel(master->dma_rx);
1262 master->dma_rx = NULL;
1263 }
1264
1265 if (master->dma_tx) {
1266 dma_release_channel(master->dma_tx);
1267 master->dma_tx = NULL;
1268 }
Robin Gongf62cacc2014-09-11 09:18:44 +08001269}
1270
1271static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
Anton Bondarenkof12ae172016-02-24 09:20:29 +01001272 struct spi_master *master)
Robin Gongf62cacc2014-09-11 09:18:44 +08001273{
Robin Gongf62cacc2014-09-11 09:18:44 +08001274 int ret;
1275
Robin Gonga02bb402015-02-03 10:25:53 +08001276 /* use pio mode for i.mx6dl chip TKT238285 */
1277 if (of_machine_is_compatible("fsl,imx6dl"))
1278 return 0;
1279
jiada wangfd8d4e22017-06-08 14:16:00 +09001280 spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
Anton Bondarenko0dfbaa82015-12-05 17:57:01 +01001281
Robin Gongf62cacc2014-09-11 09:18:44 +08001282 /* Prepare for TX DMA: */
Peter Ujfalusi5d3aa9c2019-11-13 11:42:51 +02001283 master->dma_tx = dma_request_chan(dev, "tx");
Anton Bondarenko37600472015-12-08 07:43:45 +01001284 if (IS_ERR(master->dma_tx)) {
1285 ret = PTR_ERR(master->dma_tx);
1286 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
1287 master->dma_tx = NULL;
Robin Gongf62cacc2014-09-11 09:18:44 +08001288 goto err;
1289 }
1290
Robin Gongf62cacc2014-09-11 09:18:44 +08001291 /* Prepare for RX : */
Peter Ujfalusi5d3aa9c2019-11-13 11:42:51 +02001292 master->dma_rx = dma_request_chan(dev, "rx");
Anton Bondarenko37600472015-12-08 07:43:45 +01001293 if (IS_ERR(master->dma_rx)) {
1294 ret = PTR_ERR(master->dma_rx);
1295 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
1296 master->dma_rx = NULL;
Robin Gongf62cacc2014-09-11 09:18:44 +08001297 goto err;
1298 }
1299
Robin Gongf62cacc2014-09-11 09:18:44 +08001300 init_completion(&spi_imx->dma_rx_completion);
1301 init_completion(&spi_imx->dma_tx_completion);
1302 master->can_dma = spi_imx_can_dma;
1303 master->max_dma_len = MAX_SDMA_BD_BYTES;
1304 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
1305 SPI_MASTER_MUST_TX;
Robin Gongf62cacc2014-09-11 09:18:44 +08001306
1307 return 0;
1308err:
1309 spi_imx_sdma_exit(spi_imx);
1310 return ret;
1311}
1312
1313static void spi_imx_dma_rx_callback(void *cookie)
1314{
1315 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1316
1317 complete(&spi_imx->dma_rx_completion);
1318}
1319
1320static void spi_imx_dma_tx_callback(void *cookie)
1321{
1322 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1323
1324 complete(&spi_imx->dma_tx_completion);
1325}
1326
Anton Bondarenko4bfe9272016-02-19 08:43:03 +01001327static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
1328{
1329 unsigned long timeout = 0;
1330
1331 /* Time with actual data transfer and CS change delay related to HW */
1332 timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
1333
1334 /* Add extra second for scheduler related activities */
1335 timeout += 1;
1336
1337 /* Double calculated timeout */
1338 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
1339}
1340
Robin Gongf62cacc2014-09-11 09:18:44 +08001341static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1342 struct spi_transfer *transfer)
1343{
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001344 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
Anton Bondarenko4bfe9272016-02-19 08:43:03 +01001345 unsigned long transfer_timeout;
Nicholas Mc Guire56536a72015-02-02 03:30:35 -05001346 unsigned long timeout;
Robin Gongf62cacc2014-09-11 09:18:44 +08001347 struct spi_master *master = spi_imx->bitbang.master;
1348 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
Robin Gong5ba5a372018-10-10 10:32:45 +00001349 struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
1350 unsigned int bytes_per_word, i;
Robin Gong987a2df2018-10-10 10:32:42 +00001351 int ret;
1352
Robin Gong5ba5a372018-10-10 10:32:45 +00001353 /* Get the right burst length from the last sg to ensure no tail data */
1354 bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
1355 for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
1356 if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
1357 break;
1358 }
1359 /* Use 1 as wml in case no available burst length got */
1360 if (i == 0)
1361 i = 1;
1362
1363 spi_imx->wml = i;
1364
Robin Gong987a2df2018-10-10 10:32:42 +00001365 ret = spi_imx_dma_configure(master);
1366 if (ret)
Robin Gong7a908832020-06-17 06:42:09 +08001367 goto dma_failure_no_start;
Robin Gong987a2df2018-10-10 10:32:42 +00001368
Robin Gong5ba5a372018-10-10 10:32:45 +00001369 if (!spi_imx->devtype_data->setup_wml) {
1370 dev_err(spi_imx->dev, "No setup_wml()?\n");
Robin Gong7a908832020-06-17 06:42:09 +08001371 ret = -EINVAL;
1372 goto dma_failure_no_start;
Robin Gong5ba5a372018-10-10 10:32:45 +00001373 }
Robin Gong987a2df2018-10-10 10:32:42 +00001374 spi_imx->devtype_data->setup_wml(spi_imx);
Robin Gongf62cacc2014-09-11 09:18:44 +08001375
Anton Bondarenkofab44ef2015-12-05 17:57:00 +01001376 /*
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001377 * The TX DMA setup starts the transfer, so make sure RX is configured
1378 * before TX.
Anton Bondarenkofab44ef2015-12-05 17:57:00 +01001379 */
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001380 desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
1381 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
1382 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Robin Gong7a908832020-06-17 06:42:09 +08001383 if (!desc_rx) {
1384 ret = -EINVAL;
1385 goto dma_failure_no_start;
1386 }
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001387
1388 desc_rx->callback = spi_imx_dma_rx_callback;
1389 desc_rx->callback_param = (void *)spi_imx;
1390 dmaengine_submit(desc_rx);
1391 reinit_completion(&spi_imx->dma_rx_completion);
Anton Bondarenkofab44ef2015-12-05 17:57:00 +01001392 dma_async_issue_pending(master->dma_rx);
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001393
1394 desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
1395 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
1396 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1397 if (!desc_tx) {
1398 dmaengine_terminate_all(master->dma_tx);
Robin Gongbcd8e772020-05-21 04:34:17 +08001399 dmaengine_terminate_all(master->dma_rx);
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001400 return -EINVAL;
1401 }
1402
1403 desc_tx->callback = spi_imx_dma_tx_callback;
1404 desc_tx->callback_param = (void *)spi_imx;
1405 dmaengine_submit(desc_tx);
1406 reinit_completion(&spi_imx->dma_tx_completion);
Anton Bondarenkofab44ef2015-12-05 17:57:00 +01001407 dma_async_issue_pending(master->dma_tx);
Robin Gongf62cacc2014-09-11 09:18:44 +08001408
Anton Bondarenko4bfe9272016-02-19 08:43:03 +01001409 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1410
Robin Gongf62cacc2014-09-11 09:18:44 +08001411 /* Wait SDMA to finish the data transfer.*/
Nicholas Mc Guire56536a72015-02-02 03:30:35 -05001412 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
Anton Bondarenko4bfe9272016-02-19 08:43:03 +01001413 transfer_timeout);
Nicholas Mc Guire56536a72015-02-02 03:30:35 -05001414 if (!timeout) {
Sascha Hauer6aa800c2016-02-17 14:28:48 +01001415 dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
Robin Gongf62cacc2014-09-11 09:18:44 +08001416 dmaengine_terminate_all(master->dma_tx);
Anton Bondarenkoe47b33c2015-12-05 17:56:59 +01001417 dmaengine_terminate_all(master->dma_rx);
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001418 return -ETIMEDOUT;
Robin Gongf62cacc2014-09-11 09:18:44 +08001419 }
1420
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001421 timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
1422 transfer_timeout);
1423 if (!timeout) {
1424 dev_err(&master->dev, "I/O Error in DMA RX\n");
1425 spi_imx->devtype_data->reset(spi_imx);
1426 dmaengine_terminate_all(master->dma_rx);
1427 return -ETIMEDOUT;
1428 }
Robin Gongf62cacc2014-09-11 09:18:44 +08001429
Sascha Hauer6b6192c2016-02-24 09:20:33 +01001430 return transfer->len;
Robin Gong7a908832020-06-17 06:42:09 +08001431/* fallback to pio */
1432dma_failure_no_start:
1433 transfer->error |= SPI_TRANS_FAIL_NO_START;
1434 return ret;
Robin Gongf62cacc2014-09-11 09:18:44 +08001435}
1436
1437static int spi_imx_pio_transfer(struct spi_device *spi,
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001438 struct spi_transfer *transfer)
1439{
1440 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
Christian Gmeinerff1ba3d2016-06-21 14:12:54 +02001441 unsigned long transfer_timeout;
1442 unsigned long timeout;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001443
1444 spi_imx->tx_buf = transfer->tx_buf;
1445 spi_imx->rx_buf = transfer->rx_buf;
1446 spi_imx->count = transfer->len;
1447 spi_imx->txfifo = 0;
Maxime Chevallier2ca300a2018-07-17 16:31:54 +02001448 spi_imx->remainder = 0;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001449
Axel Linaa0fe822014-02-09 11:06:04 +08001450 reinit_completion(&spi_imx->xfer_done);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001451
1452 spi_imx_push(spi_imx);
1453
Shawn Guoedd501bb2011-07-10 01:16:35 +08001454 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001455
Christian Gmeinerff1ba3d2016-06-21 14:12:54 +02001456 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1457
1458 timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
1459 transfer_timeout);
1460 if (!timeout) {
1461 dev_err(&spi->dev, "I/O Error in PIO\n");
1462 spi_imx->devtype_data->reset(spi_imx);
1463 return -ETIMEDOUT;
1464 }
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001465
1466 return transfer->len;
1467}
1468
jiada wang71abd292017-09-05 14:12:32 +09001469static int spi_imx_pio_transfer_slave(struct spi_device *spi,
1470 struct spi_transfer *transfer)
1471{
1472 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1473 int ret = transfer->len;
1474
1475 if (is_imx53_ecspi(spi_imx) &&
1476 transfer->len > MX53_MAX_TRANSFER_BYTES) {
1477 dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
1478 MX53_MAX_TRANSFER_BYTES);
1479 return -EMSGSIZE;
1480 }
1481
1482 spi_imx->tx_buf = transfer->tx_buf;
1483 spi_imx->rx_buf = transfer->rx_buf;
1484 spi_imx->count = transfer->len;
1485 spi_imx->txfifo = 0;
Maxime Chevallier2ca300a2018-07-17 16:31:54 +02001486 spi_imx->remainder = 0;
jiada wang71abd292017-09-05 14:12:32 +09001487
1488 reinit_completion(&spi_imx->xfer_done);
1489 spi_imx->slave_aborted = false;
1490
1491 spi_imx_push(spi_imx);
1492
1493 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
1494
1495 if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
1496 spi_imx->slave_aborted) {
1497 dev_dbg(&spi->dev, "interrupted\n");
1498 ret = -EINTR;
1499 }
1500
1501 /* ecspi has a HW issue when works in Slave mode,
1502 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
1503 * ECSPI_TXDATA keeps shift out the last word data,
1504 * so we have to disable ECSPI when in slave mode after the
1505 * transfer completes
1506 */
1507 if (spi_imx->devtype_data->disable)
1508 spi_imx->devtype_data->disable(spi_imx);
1509
1510 return ret;
1511}
1512
Robin Gongf62cacc2014-09-11 09:18:44 +08001513static int spi_imx_transfer(struct spi_device *spi,
1514 struct spi_transfer *transfer)
1515{
Robin Gongf62cacc2014-09-11 09:18:44 +08001516 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1517
jiada wang71abd292017-09-05 14:12:32 +09001518 /* flush rxfifo before transfer */
1519 while (spi_imx->devtype_data->rx_available(spi_imx))
Trent Piephoc8427492019-03-04 20:18:49 +00001520 readl(spi_imx->base + MXC_CSPIRXDATA);
jiada wang71abd292017-09-05 14:12:32 +09001521
1522 if (spi_imx->slave_mode)
1523 return spi_imx_pio_transfer_slave(spi, transfer);
1524
Robin Gong7a908832020-06-17 06:42:09 +08001525 if (spi_imx->usedma)
1526 return spi_imx_dma_transfer(spi_imx, transfer);
Robin Gongbcd8e772020-05-21 04:34:17 +08001527
1528 return spi_imx_pio_transfer(spi, transfer);
Robin Gongf62cacc2014-09-11 09:18:44 +08001529}
1530
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001531static int spi_imx_setup(struct spi_device *spi)
1532{
Alberto Panizzof4d4ecf2010-01-20 13:49:45 -07001533 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001534 spi->mode, spi->bits_per_word, spi->max_speed_hz);
1535
Oleksij Rempelab2f3572017-07-25 09:57:09 +02001536 if (spi->mode & SPI_NO_CS)
1537 return 0;
1538
Alexander Shiyanb36581d2016-06-08 20:02:06 +03001539 if (gpio_is_valid(spi->cs_gpio))
1540 gpio_direction_output(spi->cs_gpio,
1541 spi->mode & SPI_CS_HIGH ? 0 : 1);
Sascha Hauer6c23e5d2009-10-01 15:44:29 -07001542
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001543 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
1544
1545 return 0;
1546}
1547
1548static void spi_imx_cleanup(struct spi_device *spi)
1549{
1550}
1551
Huang Shijie9e556dc2013-10-23 16:31:50 +08001552static int
1553spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
1554{
1555 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1556 int ret;
1557
1558 ret = clk_enable(spi_imx->clk_per);
1559 if (ret)
1560 return ret;
1561
1562 ret = clk_enable(spi_imx->clk_ipg);
1563 if (ret) {
1564 clk_disable(spi_imx->clk_per);
1565 return ret;
1566 }
1567
Uwe Kleine-Könige6972712018-11-30 07:47:05 +01001568 ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
1569 if (ret) {
1570 clk_disable(spi_imx->clk_ipg);
1571 clk_disable(spi_imx->clk_per);
1572 }
1573
1574 return ret;
Huang Shijie9e556dc2013-10-23 16:31:50 +08001575}
1576
1577static int
1578spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
1579{
1580 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1581
1582 clk_disable(spi_imx->clk_ipg);
1583 clk_disable(spi_imx->clk_per);
1584 return 0;
1585}
1586
jiada wang71abd292017-09-05 14:12:32 +09001587static int spi_imx_slave_abort(struct spi_master *master)
1588{
1589 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1590
1591 spi_imx->slave_aborted = true;
1592 complete(&spi_imx->xfer_done);
1593
1594 return 0;
1595}
1596
Grant Likelyfd4a3192012-12-07 16:57:14 +00001597static int spi_imx_probe(struct platform_device *pdev)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001598{
Shawn Guo22a85e42011-07-10 01:16:41 +08001599 struct device_node *np = pdev->dev.of_node;
1600 const struct of_device_id *of_id =
1601 of_match_device(spi_imx_dt_ids, &pdev->dev);
1602 struct spi_imx_master *mxc_platform_info =
1603 dev_get_platdata(&pdev->dev);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001604 struct spi_master *master;
1605 struct spi_imx_data *spi_imx;
1606 struct resource *res;
Leif Middelschultef72efa72017-04-23 21:19:58 +02001607 int i, ret, irq, spi_drctl;
jiada wang71abd292017-09-05 14:12:32 +09001608 const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data :
1609 (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
1610 bool slave_mode;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001611
Shawn Guo22a85e42011-07-10 01:16:41 +08001612 if (!np && !mxc_platform_info) {
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001613 dev_err(&pdev->dev, "can't get the platform data\n");
1614 return -EINVAL;
1615 }
1616
jiada wang71abd292017-09-05 14:12:32 +09001617 slave_mode = devtype_data->has_slavemode &&
1618 of_property_read_bool(np, "spi-slave");
1619 if (slave_mode)
1620 master = spi_alloc_slave(&pdev->dev,
1621 sizeof(struct spi_imx_data));
1622 else
1623 master = spi_alloc_master(&pdev->dev,
1624 sizeof(struct spi_imx_data));
Fabio Estevam2c147772017-06-20 13:50:55 -03001625 if (!master)
1626 return -ENOMEM;
1627
Leif Middelschultef72efa72017-04-23 21:19:58 +02001628 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
1629 if ((ret < 0) || (spi_drctl >= 0x3)) {
1630 /* '11' is reserved */
1631 spi_drctl = 0;
1632 }
1633
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001634 platform_set_drvdata(pdev, master);
1635
Stephen Warren24778be2013-05-21 20:36:35 -06001636 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
Alexander Shiyanb36581d2016-06-08 20:02:06 +03001637 master->bus_num = np ? -1 : pdev->id;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001638
1639 spi_imx = spi_master_get_devdata(master);
Axel Lin94c69f72013-09-10 15:43:41 +08001640 spi_imx->bitbang.master = master;
Sascha Hauer6aa800c2016-02-17 14:28:48 +01001641 spi_imx->dev = &pdev->dev;
jiada wang71abd292017-09-05 14:12:32 +09001642 spi_imx->slave_mode = slave_mode;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001643
jiada wang71abd292017-09-05 14:12:32 +09001644 spi_imx->devtype_data = devtype_data;
Anton Bondarenko4686d1c2015-12-08 07:43:44 +01001645
Trent Piepho881a0b92017-10-31 12:49:04 -07001646 /* Get number of chip selects, either platform data or OF */
Alexander Shiyanb36581d2016-06-08 20:02:06 +03001647 if (mxc_platform_info) {
1648 master->num_chipselect = mxc_platform_info->num_chipselect;
Trent Piephoffd4db92017-10-31 12:49:06 -07001649 if (mxc_platform_info->chipselect) {
Kees Cooka86854d2018-06-12 14:07:58 -07001650 master->cs_gpios = devm_kcalloc(&master->dev,
1651 master->num_chipselect, sizeof(int),
1652 GFP_KERNEL);
Trent Piephoffd4db92017-10-31 12:49:06 -07001653 if (!master->cs_gpios)
1654 return -ENOMEM;
Fabio Estevam4cc122a2011-09-15 17:21:15 -03001655
Trent Piephoffd4db92017-10-31 12:49:06 -07001656 for (i = 0; i < master->num_chipselect; i++)
1657 master->cs_gpios[i] = mxc_platform_info->chipselect[i];
1658 }
Trent Piepho881a0b92017-10-31 12:49:04 -07001659 } else {
1660 u32 num_cs;
1661
1662 if (!of_property_read_u32(np, "num-cs", &num_cs))
1663 master->num_chipselect = num_cs;
1664 /* If not preset, default value of 1 is used */
1665 }
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001666
1667 spi_imx->bitbang.chipselect = spi_imx_chipselect;
1668 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
1669 spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
1670 spi_imx->bitbang.master->setup = spi_imx_setup;
1671 spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
Huang Shijie9e556dc2013-10-23 16:31:50 +08001672 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
1673 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
jiada wang71abd292017-09-05 14:12:32 +09001674 spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
Oleksij Rempelab2f3572017-07-25 09:57:09 +02001675 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1676 | SPI_NO_CS;
jiada wang26e4bb82017-06-08 14:16:01 +09001677 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
1678 is_imx53_ecspi(spi_imx))
Leif Middelschultef72efa72017-04-23 21:19:58 +02001679 spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
1680
1681 spi_imx->spi_drctl = spi_drctl;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001682
1683 init_completion(&spi_imx->xfer_done);
1684
1685 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Fabio Estevam130b82c2013-07-11 01:26:48 -03001686 spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
1687 if (IS_ERR(spi_imx->base)) {
1688 ret = PTR_ERR(spi_imx->base);
1689 goto out_master_put;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001690 }
Anton Bondarenkof12ae172016-02-24 09:20:29 +01001691 spi_imx->base_phys = res->start;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001692
Fabio Estevam4b5d6aa2014-12-29 19:38:51 -02001693 irq = platform_get_irq(pdev, 0);
1694 if (irq < 0) {
1695 ret = irq;
Fabio Estevam130b82c2013-07-11 01:26:48 -03001696 goto out_master_put;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001697 }
1698
Fabio Estevam4b5d6aa2014-12-29 19:38:51 -02001699 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
Alexander Shiyan8fc39b52014-02-22 17:23:46 +04001700 dev_name(&pdev->dev), spi_imx);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001701 if (ret) {
Fabio Estevam4b5d6aa2014-12-29 19:38:51 -02001702 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
Fabio Estevam130b82c2013-07-11 01:26:48 -03001703 goto out_master_put;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001704 }
1705
Sascha Haueraa29d8402012-03-07 09:30:22 +01001706 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1707 if (IS_ERR(spi_imx->clk_ipg)) {
1708 ret = PTR_ERR(spi_imx->clk_ipg);
Fabio Estevam130b82c2013-07-11 01:26:48 -03001709 goto out_master_put;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001710 }
1711
Sascha Haueraa29d8402012-03-07 09:30:22 +01001712 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
1713 if (IS_ERR(spi_imx->clk_per)) {
1714 ret = PTR_ERR(spi_imx->clk_per);
Fabio Estevam130b82c2013-07-11 01:26:48 -03001715 goto out_master_put;
Sascha Haueraa29d8402012-03-07 09:30:22 +01001716 }
1717
Fabio Estevam83174622013-07-11 01:26:49 -03001718 ret = clk_prepare_enable(spi_imx->clk_per);
1719 if (ret)
1720 goto out_master_put;
1721
1722 ret = clk_prepare_enable(spi_imx->clk_ipg);
1723 if (ret)
1724 goto out_put_per;
Sascha Haueraa29d8402012-03-07 09:30:22 +01001725
1726 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
Robin Gongf62cacc2014-09-11 09:18:44 +08001727 /*
Martin Kaiser2dd33f92016-10-20 00:42:25 +02001728 * Only validated on i.mx35 and i.mx6 now, can remove the constraint
1729 * if validated on other chips.
Robin Gongf62cacc2014-09-11 09:18:44 +08001730 */
jiada wangfd8d4e22017-06-08 14:16:00 +09001731 if (spi_imx->devtype_data->has_dmamode) {
Anton Bondarenkof12ae172016-02-24 09:20:29 +01001732 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
Anton Bondarenkobf9af082015-12-08 07:43:46 +01001733 if (ret == -EPROBE_DEFER)
1734 goto out_clk_put;
1735
Anton Bondarenko37600472015-12-08 07:43:45 +01001736 if (ret < 0)
1737 dev_err(&pdev->dev, "dma setup error %d, use pio\n",
1738 ret);
1739 }
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001740
Shawn Guoedd501bb2011-07-10 01:16:35 +08001741 spi_imx->devtype_data->reset(spi_imx);
Daniel Mackce1807b2009-11-19 19:01:42 +00001742
Shawn Guoedd501bb2011-07-10 01:16:35 +08001743 spi_imx->devtype_data->intctrl(spi_imx, 0);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001744
Shawn Guo22a85e42011-07-10 01:16:41 +08001745 master->dev.of_node = pdev->dev.of_node;
Trent Piepho8197f482017-11-06 10:38:23 -08001746 ret = spi_bitbang_start(&spi_imx->bitbang);
1747 if (ret) {
1748 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
1749 goto out_clk_put;
1750 }
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001751
Trent Piepho881a0b92017-10-31 12:49:04 -07001752 /* Request GPIO CS lines, if any */
1753 if (!spi_imx->slave_mode && master->cs_gpios) {
jiada wang71abd292017-09-05 14:12:32 +09001754 for (i = 0; i < master->num_chipselect; i++) {
1755 if (!gpio_is_valid(master->cs_gpios[i]))
1756 continue;
1757
1758 ret = devm_gpio_request(&pdev->dev,
1759 master->cs_gpios[i],
1760 DRIVER_NAME);
1761 if (ret) {
1762 dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
1763 master->cs_gpios[i]);
Trent Piepho4e21791e2017-10-31 12:49:05 -07001764 goto out_spi_bitbang;
jiada wang71abd292017-09-05 14:12:32 +09001765 }
1766 }
Alexander Shiyanb36581d2016-06-08 20:02:06 +03001767 }
1768
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001769 dev_info(&pdev->dev, "probed\n");
1770
Huang Shijie9e556dc2013-10-23 16:31:50 +08001771 clk_disable(spi_imx->clk_ipg);
1772 clk_disable(spi_imx->clk_per);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001773 return ret;
1774
Trent Piepho4e21791e2017-10-31 12:49:05 -07001775out_spi_bitbang:
1776 spi_bitbang_stop(&spi_imx->bitbang);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001777out_clk_put:
Sascha Haueraa29d8402012-03-07 09:30:22 +01001778 clk_disable_unprepare(spi_imx->clk_ipg);
Fabio Estevam83174622013-07-11 01:26:49 -03001779out_put_per:
1780 clk_disable_unprepare(spi_imx->clk_per);
Fabio Estevam130b82c2013-07-11 01:26:48 -03001781out_master_put:
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001782 spi_master_put(master);
Fabio Estevam130b82c2013-07-11 01:26:48 -03001783
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001784 return ret;
1785}
1786
Grant Likelyfd4a3192012-12-07 16:57:14 +00001787static int spi_imx_remove(struct platform_device *pdev)
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001788{
1789 struct spi_master *master = platform_get_drvdata(pdev);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001790 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
Stefan Agnerd5935742018-01-07 15:05:49 +01001791 int ret;
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001792
1793 spi_bitbang_stop(&spi_imx->bitbang);
1794
Stefan Agnerd5935742018-01-07 15:05:49 +01001795 ret = clk_enable(spi_imx->clk_per);
1796 if (ret)
1797 return ret;
1798
1799 ret = clk_enable(spi_imx->clk_ipg);
1800 if (ret) {
1801 clk_disable(spi_imx->clk_per);
1802 return ret;
1803 }
1804
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001805 writel(0, spi_imx->base + MXC_CSPICTRL);
Stefan Agnerd5935742018-01-07 15:05:49 +01001806 clk_disable_unprepare(spi_imx->clk_ipg);
1807 clk_disable_unprepare(spi_imx->clk_per);
Robin Gongf62cacc2014-09-11 09:18:44 +08001808 spi_imx_sdma_exit(spi_imx);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001809 spi_master_put(master);
1810
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001811 return 0;
1812}
1813
1814static struct platform_driver spi_imx_driver = {
1815 .driver = {
1816 .name = DRIVER_NAME,
Shawn Guo22a85e42011-07-10 01:16:41 +08001817 .of_match_table = spi_imx_dt_ids,
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001818 },
Uwe Kleine-Königf4ba6312010-09-09 15:29:01 +02001819 .id_table = spi_imx_devtype,
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001820 .probe = spi_imx_probe,
Grant Likelyfd4a3192012-12-07 16:57:14 +00001821 .remove = spi_imx_remove,
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001822};
Grant Likely940ab882011-10-05 11:29:49 -06001823module_platform_driver(spi_imx_driver);
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001824
wangboaf828002018-04-12 16:58:08 +08001825MODULE_DESCRIPTION("SPI Controller driver");
Uwe Kleine-König6cdeb002009-10-01 15:44:28 -07001826MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1827MODULE_LICENSE("GPL");
Fabio Estevam3133fba32013-01-07 20:42:55 -02001828MODULE_ALIAS("platform:" DRIVER_NAME);