blob: e60581283a247c24c9795ffd7bdcf2fee34b0e22 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Mingkai Hu8b60d6c2010-10-12 18:18:32 +08002/*
3 * Freescale eSPI controller driver.
4 *
5 * Copyright 2010 Freescale Semiconductor, Inc.
Mingkai Hu8b60d6c2010-10-12 18:18:32 +08006 */
Mingkai Hu8b60d6c2010-10-12 18:18:32 +08007#include <linux/delay.h>
Xiubo Lia3108362014-09-29 10:57:06 +08008#include <linux/err.h>
Mingkai Hu8b60d6c2010-10-12 18:18:32 +08009#include <linux/fsl_devices.h>
Xiubo Lia3108362014-09-29 10:57:06 +080010#include <linux/interrupt.h>
Xiubo Lia3108362014-09-29 10:57:06 +080011#include <linux/module.h>
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080012#include <linux/mm.h>
13#include <linux/of.h>
Rob Herring5af50732013-09-17 14:28:33 -050014#include <linux/of_address.h>
15#include <linux/of_irq.h>
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080016#include <linux/of_platform.h>
Xiubo Lia3108362014-09-29 10:57:06 +080017#include <linux/platform_device.h>
18#include <linux/spi/spi.h>
Heiner Kallweite9abb4d2015-08-26 21:21:55 +020019#include <linux/pm_runtime.h>
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080020#include <sysdev/fsl_soc.h>
21
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080022/* eSPI Controller registers */
Heiner Kallweit46afd382016-09-13 23:16:02 +020023#define ESPI_SPMODE 0x00 /* eSPI mode register */
24#define ESPI_SPIE 0x04 /* eSPI event register */
25#define ESPI_SPIM 0x08 /* eSPI mask register */
26#define ESPI_SPCOM 0x0c /* eSPI command register */
27#define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/
28#define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/
29#define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */
30
31#define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080032
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080033/* eSPI Controller mode register definitions */
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020034#define SPMODE_ENABLE BIT(31)
35#define SPMODE_LOOP BIT(30)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080036#define SPMODE_TXTHR(x) ((x) << 8)
37#define SPMODE_RXTHR(x) ((x) << 0)
38
39/* eSPI Controller CS mode register definitions */
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020040#define CSMODE_CI_INACTIVEHIGH BIT(31)
41#define CSMODE_CP_BEGIN_EDGECLK BIT(30)
42#define CSMODE_REV BIT(29)
43#define CSMODE_DIV16 BIT(28)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080044#define CSMODE_PM(x) ((x) << 24)
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020045#define CSMODE_POL_1 BIT(20)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080046#define CSMODE_LEN(x) ((x) << 16)
47#define CSMODE_BEF(x) ((x) << 12)
48#define CSMODE_AFT(x) ((x) << 8)
49#define CSMODE_CG(x) ((x) << 3)
50
Heiner Kallweit54731262016-10-27 21:25:58 +020051#define FSL_ESPI_FIFO_SIZE 32
Heiner Kallweite508cea2016-10-27 21:27:56 +020052#define FSL_ESPI_RXTHR 15
Heiner Kallweit54731262016-10-27 21:25:58 +020053
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080054/* Default mode/csmode for eSPI controller */
Heiner Kallweite508cea2016-10-27 21:27:56 +020055#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080056#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
57 | CSMODE_AFT(0) | CSMODE_CG(1))
58
59/* SPIE register values */
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080060#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
61#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020062#define SPIE_TXE BIT(15) /* TX FIFO empty */
63#define SPIE_DON BIT(14) /* TX done */
64#define SPIE_RXT BIT(13) /* RX FIFO threshold */
65#define SPIE_RXF BIT(12) /* RX FIFO full */
66#define SPIE_TXT BIT(11) /* TX FIFO threshold*/
67#define SPIE_RNE BIT(9) /* RX FIFO not empty */
68#define SPIE_TNF BIT(8) /* TX FIFO not full */
69
70/* SPIM register values */
71#define SPIM_TXE BIT(15) /* TX FIFO empty */
72#define SPIM_DON BIT(14) /* TX done */
73#define SPIM_RXT BIT(13) /* RX FIFO threshold */
74#define SPIM_RXF BIT(12) /* RX FIFO full */
75#define SPIM_TXT BIT(11) /* TX FIFO threshold*/
76#define SPIM_RNE BIT(9) /* RX FIFO not empty */
77#define SPIM_TNF BIT(8) /* TX FIFO not full */
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080078
79/* SPCOM register values */
80#define SPCOM_CS(x) ((x) << 30)
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020081#define SPCOM_DO BIT(28) /* Dual output */
82#define SPCOM_TO BIT(27) /* TX only */
83#define SPCOM_RXSKIP(x) ((x) << 16)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080084#define SPCOM_TRANLEN(x) ((x) << 0)
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020085
Hou Zhiqiang5cfa1e42016-01-22 18:58:26 +080086#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080087
Heiner Kallweite9abb4d2015-08-26 21:21:55 +020088#define AUTOSUSPEND_TIMEOUT 2000
89
Heiner Kallweit35ab0462016-11-13 14:40:51 +010090struct fsl_espi {
91 struct device *dev;
92 void __iomem *reg_base;
93
Heiner Kallweit05823432016-11-25 23:59:24 +010094 struct list_head *m_transfers;
95 struct spi_transfer *tx_t;
96 unsigned int tx_pos;
97 bool tx_done;
Heiner Kallweitdcb425f32016-11-25 23:59:57 +010098 struct spi_transfer *rx_t;
99 unsigned int rx_pos;
100 bool rx_done;
Heiner Kallweit05823432016-11-25 23:59:24 +0100101
Heiner Kallweite1cdee72016-11-25 23:58:49 +0100102 bool swab;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100103 unsigned int rxskip;
104
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100105 spinlock_t lock;
106
107 u32 spibrg; /* SPIBRG input clock */
108
109 struct completion done;
110};
111
Heiner Kallweit219b5e32016-11-13 14:38:05 +0100112struct fsl_espi_cs {
113 u32 hw_mode;
114};
115
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100116static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset)
Heiner Kallweit46afd382016-09-13 23:16:02 +0200117{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100118 return ioread32be(espi->reg_base + offset);
Heiner Kallweit46afd382016-09-13 23:16:02 +0200119}
120
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100121static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset)
122{
Heiner Kallweit7e2ef002016-11-30 20:28:09 +0100123 return ioread16be(espi->reg_base + offset);
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100124}
125
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100126static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset)
Heiner Kallweit46afd382016-09-13 23:16:02 +0200127{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100128 return ioread8(espi->reg_base + offset);
Heiner Kallweit46afd382016-09-13 23:16:02 +0200129}
130
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100131static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset,
Heiner Kallweit46afd382016-09-13 23:16:02 +0200132 u32 val)
133{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100134 iowrite32be(val, espi->reg_base + offset);
Heiner Kallweit46afd382016-09-13 23:16:02 +0200135}
136
Heiner Kallweit05823432016-11-25 23:59:24 +0100137static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset,
138 u16 val)
139{
Heiner Kallweit7e2ef002016-11-30 20:28:09 +0100140 iowrite16be(val, espi->reg_base + offset);
Heiner Kallweit05823432016-11-25 23:59:24 +0100141}
142
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100143static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
Heiner Kallweit46afd382016-09-13 23:16:02 +0200144 u8 val)
145{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100146 iowrite8(val, espi->reg_base + offset);
Heiner Kallweit46afd382016-09-13 23:16:02 +0200147}
148
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200149static int fsl_espi_check_message(struct spi_message *m)
150{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100151 struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200152 struct spi_transfer *t, *first;
153
154 if (m->frame_length > SPCOM_TRANLEN_MAX) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100155 dev_err(espi->dev, "message too long, size is %u bytes\n",
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200156 m->frame_length);
157 return -EMSGSIZE;
158 }
159
160 first = list_first_entry(&m->transfers, struct spi_transfer,
161 transfer_list);
Heiner Kallweite4be7052016-10-02 14:22:35 +0200162
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200163 list_for_each_entry(t, &m->transfers, transfer_list) {
164 if (first->bits_per_word != t->bits_per_word ||
165 first->speed_hz != t->speed_hz) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100166 dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200167 return -EINVAL;
168 }
169 }
170
Heiner Kallweite4be7052016-10-02 14:22:35 +0200171 /* ESPI supports MSB-first transfers for word size 8 / 16 only */
172 if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 &&
173 first->bits_per_word != 16) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100174 dev_err(espi->dev,
Heiner Kallweite4be7052016-10-02 14:22:35 +0200175 "MSB-first transfer not supported for wordsize %u\n",
176 first->bits_per_word);
177 return -EINVAL;
178 }
179
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200180 return 0;
181}
182
Heiner Kallweitaca75152016-11-09 22:58:01 +0100183static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m)
184{
185 struct spi_transfer *t;
186 unsigned int i = 0, rxskip = 0;
187
188 /*
189 * prerequisites for ESPI rxskip mode:
190 * - message has two transfers
191 * - first transfer is a write and second is a read
192 *
193 * In addition the current low-level transfer mechanism requires
194 * that the rxskip bytes fit into the TX FIFO. Else the transfer
195 * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
196 * the TX FIFO isn't re-filled.
197 */
198 list_for_each_entry(t, &m->transfers, transfer_list) {
199 if (i == 0) {
200 if (!t->tx_buf || t->rx_buf ||
201 t->len > FSL_ESPI_FIFO_SIZE)
202 return 0;
203 rxskip = t->len;
204 } else if (i == 1) {
205 if (t->tx_buf || !t->rx_buf)
206 return 0;
207 }
208 i++;
209 }
210
211 return i == 2 ? rxskip : 0;
212}
213
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100214static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events)
Heiner Kallweit54731262016-10-27 21:25:58 +0200215{
216 u32 tx_fifo_avail;
Heiner Kallweit05823432016-11-25 23:59:24 +0100217 unsigned int tx_left;
218 const void *tx_buf;
Heiner Kallweit54731262016-10-27 21:25:58 +0200219
220 /* if events is zero transfer has not started and tx fifo is empty */
221 tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE;
Heiner Kallweit05823432016-11-25 23:59:24 +0100222start:
223 tx_left = espi->tx_t->len - espi->tx_pos;
224 tx_buf = espi->tx_t->tx_buf;
225 while (tx_fifo_avail >= min(4U, tx_left) && tx_left) {
226 if (tx_left >= 4) {
227 if (!tx_buf)
228 fsl_espi_write_reg(espi, ESPI_SPITF, 0);
229 else if (espi->swab)
230 fsl_espi_write_reg(espi, ESPI_SPITF,
231 swahb32p(tx_buf + espi->tx_pos));
232 else
233 fsl_espi_write_reg(espi, ESPI_SPITF,
234 *(u32 *)(tx_buf + espi->tx_pos));
235 espi->tx_pos += 4;
236 tx_left -= 4;
Heiner Kallweit54731262016-10-27 21:25:58 +0200237 tx_fifo_avail -= 4;
Heiner Kallweit05823432016-11-25 23:59:24 +0100238 } else if (tx_left >= 2 && tx_buf && espi->swab) {
239 fsl_espi_write_reg16(espi, ESPI_SPITF,
240 swab16p(tx_buf + espi->tx_pos));
241 espi->tx_pos += 2;
242 tx_left -= 2;
243 tx_fifo_avail -= 2;
Heiner Kallweit54731262016-10-27 21:25:58 +0200244 } else {
Heiner Kallweit05823432016-11-25 23:59:24 +0100245 if (!tx_buf)
246 fsl_espi_write_reg8(espi, ESPI_SPITF, 0);
247 else
248 fsl_espi_write_reg8(espi, ESPI_SPITF,
249 *(u8 *)(tx_buf + espi->tx_pos));
250 espi->tx_pos += 1;
251 tx_left -= 1;
Heiner Kallweit54731262016-10-27 21:25:58 +0200252 tx_fifo_avail -= 1;
253 }
Heiner Kallweit05823432016-11-25 23:59:24 +0100254 }
255
256 if (!tx_left) {
257 /* Last transfer finished, in rxskip mode only one is needed */
258 if (list_is_last(&espi->tx_t->transfer_list,
259 espi->m_transfers) || espi->rxskip) {
260 espi->tx_done = true;
261 return;
262 }
263 espi->tx_t = list_next_entry(espi->tx_t, transfer_list);
264 espi->tx_pos = 0;
265 /* continue with next transfer if tx fifo is not full */
266 if (tx_fifo_avail)
267 goto start;
268 }
Heiner Kallweit54731262016-10-27 21:25:58 +0200269}
270
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100271static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events)
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200272{
273 u32 rx_fifo_avail = SPIE_RXCNT(events);
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100274 unsigned int rx_left;
275 void *rx_buf;
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200276
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100277start:
278 rx_left = espi->rx_t->len - espi->rx_pos;
279 rx_buf = espi->rx_t->rx_buf;
280 while (rx_fifo_avail >= min(4U, rx_left) && rx_left) {
281 if (rx_left >= 4) {
282 u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF);
283
284 if (rx_buf && espi->swab)
285 *(u32 *)(rx_buf + espi->rx_pos) = swahb32(val);
286 else if (rx_buf)
287 *(u32 *)(rx_buf + espi->rx_pos) = val;
288 espi->rx_pos += 4;
289 rx_left -= 4;
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200290 rx_fifo_avail -= 4;
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100291 } else if (rx_left >= 2 && rx_buf && espi->swab) {
292 u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF);
293
294 *(u16 *)(rx_buf + espi->rx_pos) = swab16(val);
295 espi->rx_pos += 2;
296 rx_left -= 2;
297 rx_fifo_avail -= 2;
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200298 } else {
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100299 u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF);
300
301 if (rx_buf)
302 *(u8 *)(rx_buf + espi->rx_pos) = val;
303 espi->rx_pos += 1;
304 rx_left -= 1;
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200305 rx_fifo_avail -= 1;
306 }
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100307 }
308
309 if (!rx_left) {
310 if (list_is_last(&espi->rx_t->transfer_list,
311 espi->m_transfers)) {
312 espi->rx_done = true;
313 return;
314 }
315 espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
316 espi->rx_pos = 0;
317 /* continue with next transfer if rx fifo is not empty */
318 if (rx_fifo_avail)
319 goto start;
320 }
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200321}
322
Heiner Kallweitea616ee2016-08-25 06:44:42 +0200323static void fsl_espi_setup_transfer(struct spi_device *spi,
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800324 struct spi_transfer *t)
325{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100326 struct fsl_espi *espi = spi_master_get_devdata(spi->master);
Heiner Kallweitd198ebf2016-09-13 23:15:45 +0200327 int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
Paulo Zaneti73aaf152016-10-29 11:02:19 +0200328 u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
Heiner Kallweit219b5e32016-11-13 14:38:05 +0100329 struct fsl_espi_cs *cs = spi_get_ctldata(spi);
Heiner Kallweit8f3086d2016-11-04 21:01:12 +0100330 u32 hw_mode_old = cs->hw_mode;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800331
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800332 /* mask out bits we are going to set */
333 cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
334
Heiner Kallweita755af52016-09-04 09:56:57 +0200335 cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800336
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100337 pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800338
Paulo Zaneti73aaf152016-10-29 11:02:19 +0200339 if (pm > 15) {
340 cs->hw_mode |= CSMODE_DIV16;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100341 pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800342 }
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800343
344 cs->hw_mode |= CSMODE_PM(pm);
345
Heiner Kallweit8f3086d2016-11-04 21:01:12 +0100346 /* don't write the mode register if the mode doesn't change */
347 if (cs->hw_mode != hw_mode_old)
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100348 fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select),
Heiner Kallweit8f3086d2016-11-04 21:01:12 +0100349 cs->hw_mode);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800350}
351
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800352static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
353{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100354 struct fsl_espi *espi = spi_master_get_devdata(spi->master);
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100355 unsigned int rx_len = t->len;
Heiner Kallweitaca75152016-11-09 22:58:01 +0100356 u32 mask, spcom;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800357 int ret;
358
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100359 reinit_completion(&espi->done);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800360
361 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
Heiner Kallweitaca75152016-11-09 22:58:01 +0100362 spcom = SPCOM_CS(spi->chip_select);
363 spcom |= SPCOM_TRANLEN(t->len - 1);
364
365 /* configure RXSKIP mode */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100366 if (espi->rxskip) {
367 spcom |= SPCOM_RXSKIP(espi->rxskip);
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100368 rx_len = t->len - espi->rxskip;
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100369 if (t->rx_nbits == SPI_NBITS_DUAL)
370 spcom |= SPCOM_DO;
Heiner Kallweitaca75152016-11-09 22:58:01 +0100371 }
372
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100373 fsl_espi_write_reg(espi, ESPI_SPCOM, spcom);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800374
Heiner Kallweite508cea2016-10-27 21:27:56 +0200375 /* enable interrupts */
376 mask = SPIM_DON;
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100377 if (rx_len > FSL_ESPI_FIFO_SIZE)
Heiner Kallweite508cea2016-10-27 21:27:56 +0200378 mask |= SPIM_RXT;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100379 fsl_espi_write_reg(espi, ESPI_SPIM, mask);
Heiner Kallweit5bcc6a22016-09-07 22:53:01 +0200380
Heiner Kallweit54731262016-10-27 21:25:58 +0200381 /* Prevent filling the fifo from getting interrupted */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100382 spin_lock_irq(&espi->lock);
383 fsl_espi_fill_tx_fifo(espi, 0);
384 spin_unlock_irq(&espi->lock);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800385
Nobuteru Hayashiaa70e562016-03-18 11:35:21 +0000386 /* Won't hang up forever, SPI bus sometimes got lost interrupts... */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100387 ret = wait_for_completion_timeout(&espi->done, 2 * HZ);
Nobuteru Hayashiaa70e562016-03-18 11:35:21 +0000388 if (ret == 0)
Heiner Kallweit05823432016-11-25 23:59:24 +0100389 dev_err(espi->dev, "Transfer timed out!\n");
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800390
391 /* disable rx ints */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100392 fsl_espi_write_reg(espi, ESPI_SPIM, 0);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800393
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200394 return ret == 0 ? -ETIMEDOUT : 0;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800395}
396
Heiner Kallweit38d003f2016-09-07 22:54:51 +0200397static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800398{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100399 struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800400 struct spi_device *spi = m->spi;
Heiner Kallweit38d003f2016-09-07 22:54:51 +0200401 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800402
Heiner Kallweite1cdee72016-11-25 23:58:49 +0100403 /* In case of LSB-first and bits_per_word > 8 byte-swap all words */
404 espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8;
405
Heiner Kallweit05823432016-11-25 23:59:24 +0100406 espi->m_transfers = &m->transfers;
407 espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer,
408 transfer_list);
409 espi->tx_pos = 0;
410 espi->tx_done = false;
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100411 espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer,
412 transfer_list);
413 espi->rx_pos = 0;
414 espi->rx_done = false;
Heiner Kallweit05823432016-11-25 23:59:24 +0100415
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100416 espi->rxskip = fsl_espi_check_rxskip_mode(m);
417 if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) {
418 dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n");
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100419 return -EINVAL;
420 }
421
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100422 /* In RXSKIP mode skip first transfer for reads */
423 if (espi->rxskip)
424 espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
425
Heiner Kallweitfaceef32016-09-07 22:52:06 +0200426 fsl_espi_setup_transfer(spi, trans);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800427
Heiner Kallweit06af1152016-09-07 22:54:35 +0200428 ret = fsl_espi_bufs(spi, trans);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800429
Alexandru Ardeleane74dc5c2019-09-26 13:51:37 +0300430 spi_transfer_delay_exec(trans);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800431
Heiner Kallweite33a3ad2016-09-07 22:51:10 +0200432 return ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800433}
434
Heiner Kallweitc592bec2014-12-03 07:56:17 +0100435static int fsl_espi_do_one_msg(struct spi_master *master,
436 struct spi_message *m)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800437{
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100438 unsigned int delay_usecs = 0, rx_nbits = 0;
Alexandru Ardelean3984d392019-09-26 13:51:44 +0300439 unsigned int delay_nsecs = 0, delay_nsecs1 = 0;
Heiner Kallweitfaceef32016-09-07 22:52:06 +0200440 struct spi_transfer *t, trans = {};
Heiner Kallweite33a3ad2016-09-07 22:51:10 +0200441 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800442
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200443 ret = fsl_espi_check_message(m);
444 if (ret)
445 goto out;
446
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800447 list_for_each_entry(t, &m->transfers, transfer_list) {
Alexandru Ardelean3984d392019-09-26 13:51:44 +0300448 if (t->delay_usecs) {
449 if (t->delay_usecs > delay_usecs) {
450 delay_usecs = t->delay_usecs;
451 delay_nsecs = delay_usecs * 1000;
452 }
453 } else {
454 delay_nsecs1 = spi_delay_to_ns(&t->delay, t);
455 if (delay_nsecs1 > delay_nsecs)
456 delay_nsecs = delay_nsecs1;
457 }
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100458 if (t->rx_nbits > rx_nbits)
459 rx_nbits = t->rx_nbits;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800460 }
461
Heiner Kallweit96361faf2016-09-07 22:54:00 +0200462 t = list_first_entry(&m->transfers, struct spi_transfer,
463 transfer_list);
464
Heiner Kallweit06af1152016-09-07 22:54:35 +0200465 trans.len = m->frame_length;
Heiner Kallweit96361faf2016-09-07 22:54:00 +0200466 trans.speed_hz = t->speed_hz;
467 trans.bits_per_word = t->bits_per_word;
Alexandru Ardelean3984d392019-09-26 13:51:44 +0300468 trans.delay.value = delay_nsecs;
469 trans.delay.unit = SPI_DELAY_UNIT_NSECS;
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100470 trans.rx_nbits = rx_nbits;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800471
Heiner Kallweit06af1152016-09-07 22:54:35 +0200472 if (trans.len)
473 ret = fsl_espi_trans(m, &trans);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800474
Heiner Kallweitfaceef32016-09-07 22:52:06 +0200475 m->actual_length = ret ? 0 : trans.len;
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200476out:
Heiner Kallweit0319d492016-09-07 22:51:29 +0200477 if (m->status == -EINPROGRESS)
478 m->status = ret;
479
Heiner Kallweitc592bec2014-12-03 07:56:17 +0100480 spi_finalize_current_message(master);
Heiner Kallweit0319d492016-09-07 22:51:29 +0200481
482 return ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800483}
484
485static int fsl_espi_setup(struct spi_device *spi)
486{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100487 struct fsl_espi *espi;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800488 u32 loop_mode;
Heiner Kallweit219b5e32016-11-13 14:38:05 +0100489 struct fsl_espi_cs *cs = spi_get_ctldata(spi);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800490
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800491 if (!cs) {
Axel Lind9f26742014-08-31 12:44:09 +0800492 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800493 if (!cs)
494 return -ENOMEM;
Axel Lind9f26742014-08-31 12:44:09 +0800495 spi_set_ctldata(spi, cs);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800496 }
497
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100498 espi = spi_master_get_devdata(spi->master);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800499
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100500 pm_runtime_get_sync(espi->dev);
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200501
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100502 cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select));
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800503 /* mask out bits we are going to set */
504 cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
505 | CSMODE_REV);
506
507 if (spi->mode & SPI_CPHA)
508 cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
509 if (spi->mode & SPI_CPOL)
510 cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
511 if (!(spi->mode & SPI_LSB_FIRST))
512 cs->hw_mode |= CSMODE_REV;
513
514 /* Handle the loop mode */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100515 loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800516 loop_mode &= ~SPMODE_LOOP;
517 if (spi->mode & SPI_LOOP)
518 loop_mode |= SPMODE_LOOP;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100519 fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800520
Heiner Kallweitea616ee2016-08-25 06:44:42 +0200521 fsl_espi_setup_transfer(spi, NULL);
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200522
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100523 pm_runtime_mark_last_busy(espi->dev);
524 pm_runtime_put_autosuspend(espi->dev);
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200525
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800526 return 0;
527}
528
Axel Lind9f26742014-08-31 12:44:09 +0800529static void fsl_espi_cleanup(struct spi_device *spi)
530{
Heiner Kallweit219b5e32016-11-13 14:38:05 +0100531 struct fsl_espi_cs *cs = spi_get_ctldata(spi);
Axel Lind9f26742014-08-31 12:44:09 +0800532
533 kfree(cs);
534 spi_set_ctldata(spi, NULL);
535}
536
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100537static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800538{
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100539 if (!espi->rx_done)
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100540 fsl_espi_read_rx_fifo(espi, events);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800541
Heiner Kallweit05823432016-11-25 23:59:24 +0100542 if (!espi->tx_done)
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100543 fsl_espi_fill_tx_fifo(espi, events);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800544
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100545 if (!espi->tx_done || !espi->rx_done)
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200546 return;
547
548 /* we're done, but check for errors before returning */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100549 events = fsl_espi_read_reg(espi, ESPI_SPIE);
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200550
551 if (!(events & SPIE_DON))
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100552 dev_err(espi->dev,
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200553 "Transfer done but SPIE_DON isn't set!\n");
554
Tiago Brusamarello516ddd72018-07-26 11:12:11 -0300555 if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100556 dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
Tiago Brusamarello516ddd72018-07-26 11:12:11 -0300557 dev_err(espi->dev, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n",
558 SPIE_RXCNT(events), SPIE_TXCNT(events));
559 }
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200560
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100561 complete(&espi->done);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800562}
563
564static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
565{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100566 struct fsl_espi *espi = context_data;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800567 u32 events;
568
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100569 spin_lock(&espi->lock);
Heiner Kallweit54731262016-10-27 21:25:58 +0200570
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800571 /* Get interrupt events(tx/rx) */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100572 events = fsl_espi_read_reg(espi, ESPI_SPIE);
Heiner Kallweit54731262016-10-27 21:25:58 +0200573 if (!events) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100574 spin_unlock(&espi->lock);
Heiner Kallweit35f5d712016-09-13 23:15:57 +0200575 return IRQ_NONE;
Heiner Kallweit54731262016-10-27 21:25:58 +0200576 }
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800577
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100578 dev_vdbg(espi->dev, "%s: events %x\n", __func__, events);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800579
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100580 fsl_espi_cpu_irq(espi, events);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800581
Heiner Kallweit35f5d712016-09-13 23:15:57 +0200582 /* Clear the events */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100583 fsl_espi_write_reg(espi, ESPI_SPIE, events);
Heiner Kallweit35f5d712016-09-13 23:15:57 +0200584
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100585 spin_unlock(&espi->lock);
Heiner Kallweit54731262016-10-27 21:25:58 +0200586
Heiner Kallweit35f5d712016-09-13 23:15:57 +0200587 return IRQ_HANDLED;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800588}
589
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200590#ifdef CONFIG_PM
591static int fsl_espi_runtime_suspend(struct device *dev)
Heiner Kallweit75506d02014-12-03 07:56:19 +0100592{
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200593 struct spi_master *master = dev_get_drvdata(dev);
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100594 struct fsl_espi *espi = spi_master_get_devdata(master);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100595 u32 regval;
596
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100597 regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100598 regval &= ~SPMODE_ENABLE;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100599 fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100600
601 return 0;
602}
603
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200604static int fsl_espi_runtime_resume(struct device *dev)
Heiner Kallweit75506d02014-12-03 07:56:19 +0100605{
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200606 struct spi_master *master = dev_get_drvdata(dev);
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100607 struct fsl_espi *espi = spi_master_get_devdata(master);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100608 u32 regval;
609
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100610 regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100611 regval |= SPMODE_ENABLE;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100612 fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100613
614 return 0;
615}
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200616#endif
Heiner Kallweit75506d02014-12-03 07:56:19 +0100617
Heiner Kallweit02a595d2016-08-17 21:11:01 +0200618static size_t fsl_espi_max_message_size(struct spi_device *spi)
Michal Suchanekb541eef2015-12-02 10:38:21 +0000619{
620 return SPCOM_TRANLEN_MAX;
621}
622
Heiner Kallweit456c7422016-11-13 14:40:18 +0100623static void fsl_espi_init_regs(struct device *dev, bool initial)
624{
625 struct spi_master *master = dev_get_drvdata(dev);
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100626 struct fsl_espi *espi = spi_master_get_devdata(master);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100627 struct device_node *nc;
628 u32 csmode, cs, prop;
629 int ret;
630
631 /* SPI controller initializations */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100632 fsl_espi_write_reg(espi, ESPI_SPMODE, 0);
633 fsl_espi_write_reg(espi, ESPI_SPIM, 0);
634 fsl_espi_write_reg(espi, ESPI_SPCOM, 0);
635 fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100636
637 /* Init eSPI CS mode register */
638 for_each_available_child_of_node(master->dev.of_node, nc) {
639 /* get chip select */
640 ret = of_property_read_u32(nc, "reg", &cs);
641 if (ret || cs >= master->num_chipselect)
642 continue;
643
644 csmode = CSMODE_INIT_VAL;
645
646 /* check if CSBEF is set in device tree */
647 ret = of_property_read_u32(nc, "fsl,csbef", &prop);
648 if (!ret) {
649 csmode &= ~(CSMODE_BEF(0xf));
650 csmode |= CSMODE_BEF(prop);
651 }
652
653 /* check if CSAFT is set in device tree */
654 ret = of_property_read_u32(nc, "fsl,csaft", &prop);
655 if (!ret) {
656 csmode &= ~(CSMODE_AFT(0xf));
657 csmode |= CSMODE_AFT(prop);
658 }
659
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100660 fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100661
662 if (initial)
663 dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode);
664 }
665
666 /* Enable SPI interface */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100667 fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100668}
669
Heiner Kallweit604042a2016-09-17 15:43:31 +0200670static int fsl_espi_probe(struct device *dev, struct resource *mem,
Heiner Kallweit74543462016-11-13 14:36:39 +0100671 unsigned int irq, unsigned int num_cs)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800672{
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800673 struct spi_master *master;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100674 struct fsl_espi *espi;
Heiner Kallweitb497eb02016-10-01 21:07:52 +0200675 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800676
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100677 master = spi_alloc_master(dev, sizeof(struct fsl_espi));
Heiner Kallweit604042a2016-09-17 15:43:31 +0200678 if (!master)
679 return -ENOMEM;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800680
681 dev_set_drvdata(dev, master);
682
Heiner Kallweit7cb55572016-11-13 14:37:41 +0100683 master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
684 SPI_LSB_FIRST | SPI_LOOP;
685 master->dev.of_node = dev->of_node;
Stephen Warren24778be2013-05-21 20:36:35 -0600686 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800687 master->setup = fsl_espi_setup;
Axel Lind9f26742014-08-31 12:44:09 +0800688 master->cleanup = fsl_espi_cleanup;
Heiner Kallweitc592bec2014-12-03 07:56:17 +0100689 master->transfer_one_message = fsl_espi_do_one_msg;
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200690 master->auto_runtime_pm = true;
Heiner Kallweit02a595d2016-08-17 21:11:01 +0200691 master->max_message_size = fsl_espi_max_message_size;
Heiner Kallweit74543462016-11-13 14:36:39 +0100692 master->num_chipselect = num_cs;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800693
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100694 espi = spi_master_get_devdata(master);
695 spin_lock_init(&espi->lock);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800696
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100697 espi->dev = dev;
698 espi->spibrg = fsl_get_sys_freq();
699 if (espi->spibrg == -1) {
Heiner Kallweit7cb55572016-11-13 14:37:41 +0100700 dev_err(dev, "Can't get sys frequency!\n");
701 ret = -EINVAL;
702 goto err_probe;
703 }
Heiner Kallweitf254e65c2016-11-15 21:56:33 +0100704 /* determined by clock divider fields DIV16/PM in register SPMODEx */
705 master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
706 master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
Heiner Kallweit7cb55572016-11-13 14:37:41 +0100707
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100708 init_completion(&espi->done);
Heiner Kallweit7cb55572016-11-13 14:37:41 +0100709
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100710 espi->reg_base = devm_ioremap_resource(dev, mem);
711 if (IS_ERR(espi->reg_base)) {
712 ret = PTR_ERR(espi->reg_base);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800713 goto err_probe;
714 }
715
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800716 /* Register for SPI Interrupt */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100717 ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800718 if (ret)
Heiner Kallweit4178b6b2015-08-26 21:21:50 +0200719 goto err_probe;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800720
Heiner Kallweit456c7422016-11-13 14:40:18 +0100721 fsl_espi_init_regs(dev, true);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800722
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200723 pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
724 pm_runtime_use_autosuspend(dev);
725 pm_runtime_set_active(dev);
726 pm_runtime_enable(dev);
727 pm_runtime_get_sync(dev);
728
Heiner Kallweit4178b6b2015-08-26 21:21:50 +0200729 ret = devm_spi_register_master(dev, master);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800730 if (ret < 0)
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200731 goto err_pm;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800732
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100733 dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800734
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200735 pm_runtime_mark_last_busy(dev);
736 pm_runtime_put_autosuspend(dev);
737
Heiner Kallweit604042a2016-09-17 15:43:31 +0200738 return 0;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800739
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200740err_pm:
741 pm_runtime_put_noidle(dev);
742 pm_runtime_disable(dev);
743 pm_runtime_set_suspended(dev);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800744err_probe:
745 spi_master_put(master);
Heiner Kallweit604042a2016-09-17 15:43:31 +0200746 return ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800747}
748
749static int of_fsl_espi_get_chipselects(struct device *dev)
750{
751 struct device_node *np = dev->of_node;
Heiner Kallweitb497eb02016-10-01 21:07:52 +0200752 u32 num_cs;
753 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800754
Heiner Kallweitb497eb02016-10-01 21:07:52 +0200755 ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs);
756 if (ret) {
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800757 dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
Heiner Kallweit74543462016-11-13 14:36:39 +0100758 return 0;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800759 }
760
Heiner Kallweit74543462016-11-13 14:36:39 +0100761 return num_cs;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800762}
763
Grant Likelyfd4a3192012-12-07 16:57:14 +0000764static int of_fsl_espi_probe(struct platform_device *ofdev)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800765{
766 struct device *dev = &ofdev->dev;
767 struct device_node *np = ofdev->dev.of_node;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800768 struct resource mem;
Heiner Kallweit74543462016-11-13 14:36:39 +0100769 unsigned int irq, num_cs;
Heiner Kallweitacf69212016-09-17 15:43:00 +0200770 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800771
Heiner Kallweite3ce4f42016-11-13 14:37:18 +0100772 if (of_property_read_bool(np, "mode")) {
773 dev_err(dev, "mode property is not supported on ESPI!\n");
774 return -EINVAL;
775 }
776
Heiner Kallweit74543462016-11-13 14:36:39 +0100777 num_cs = of_fsl_espi_get_chipselects(dev);
778 if (!num_cs)
779 return -EINVAL;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800780
781 ret = of_address_to_resource(np, 0, &mem);
782 if (ret)
Heiner Kallweitacf69212016-09-17 15:43:00 +0200783 return ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800784
Thierry Redingf7578492013-09-18 15:24:44 +0200785 irq = irq_of_parse_and_map(np, 0);
Heiner Kallweitacf69212016-09-17 15:43:00 +0200786 if (!irq)
787 return -EINVAL;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800788
Heiner Kallweit74543462016-11-13 14:36:39 +0100789 return fsl_espi_probe(dev, &mem, irq, num_cs);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800790}
791
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200792static int of_fsl_espi_remove(struct platform_device *dev)
793{
794 pm_runtime_disable(&dev->dev);
795
796 return 0;
797}
798
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800799#ifdef CONFIG_PM_SLEEP
800static int of_fsl_espi_suspend(struct device *dev)
801{
802 struct spi_master *master = dev_get_drvdata(dev);
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800803 int ret;
804
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800805 ret = spi_master_suspend(master);
Geert Uytterhoeven7c5d8a22018-09-05 10:51:57 +0200806 if (ret)
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800807 return ret;
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800808
Heiner Kallweita9a813d2016-11-15 21:37:17 +0100809 return pm_runtime_force_suspend(dev);
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800810}
811
812static int of_fsl_espi_resume(struct device *dev)
813{
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800814 struct spi_master *master = dev_get_drvdata(dev);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100815 int ret;
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800816
Heiner Kallweit456c7422016-11-13 14:40:18 +0100817 fsl_espi_init_regs(dev, false);
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800818
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200819 ret = pm_runtime_force_resume(dev);
820 if (ret < 0)
821 return ret;
822
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800823 return spi_master_resume(master);
824}
825#endif /* CONFIG_PM_SLEEP */
826
827static const struct dev_pm_ops espi_pm = {
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200828 SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend,
829 fsl_espi_runtime_resume, NULL)
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800830 SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
831};
832
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800833static const struct of_device_id of_fsl_espi_match[] = {
834 { .compatible = "fsl,mpc8536-espi" },
835 {}
836};
837MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
838
Grant Likely18d306d2011-02-22 21:02:43 -0700839static struct platform_driver fsl_espi_driver = {
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800840 .driver = {
841 .name = "fsl_espi",
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800842 .of_match_table = of_fsl_espi_match,
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800843 .pm = &espi_pm,
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800844 },
845 .probe = of_fsl_espi_probe,
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200846 .remove = of_fsl_espi_remove,
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800847};
Grant Likely940ab882011-10-05 11:29:49 -0600848module_platform_driver(fsl_espi_driver);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800849
850MODULE_AUTHOR("Mingkai Hu");
851MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
852MODULE_LICENSE("GPL");