blob: e7bee8d5fb0bb836a6350b630e86d5ddf32d849a [file] [log] [blame]
Sowjanya Komatineni921fc182020-12-21 13:17:34 -08001// SPDX-License-Identifier: GPL-2.0-only
2//
3// Copyright (C) 2020 NVIDIA CORPORATION.
4
5#include <linux/clk.h>
6#include <linux/completion.h>
7#include <linux/delay.h>
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmapool.h>
11#include <linux/err.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/iopoll.h>
15#include <linux/kernel.h>
16#include <linux/kthread.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/pm_runtime.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/reset.h>
23#include <linux/spi/spi.h>
24
25#define QSPI_COMMAND1 0x000
26#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
27#define QSPI_PACKED BIT(5)
28#define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7)
29#define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7)
30#define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0)
31#define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1)
32#define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2)
33#define QSPI_SDR_DDR_SEL BIT(9)
34#define QSPI_TX_EN BIT(11)
35#define QSPI_RX_EN BIT(12)
36#define QSPI_CS_SW_VAL BIT(20)
37#define QSPI_CS_SW_HW BIT(21)
38#define QSPI_CONTROL_MODE_0 (0 << 28)
39#define QSPI_CONTROL_MODE_3 (3 << 28)
40#define QSPI_CONTROL_MODE_MASK (3 << 28)
41#define QSPI_M_S BIT(30)
42#define QSPI_PIO BIT(31)
43
44#define QSPI_COMMAND2 0x004
45#define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10)
46#define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0)
47
48#define QSPI_CS_TIMING1 0x008
49#define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
50
51#define QSPI_CS_TIMING2 0x00c
52#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0)
53#define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5)
54
55#define QSPI_TRANS_STATUS 0x010
56#define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff)
57#define QSPI_RDY BIT(30)
58
59#define QSPI_FIFO_STATUS 0x014
60#define QSPI_RX_FIFO_EMPTY BIT(0)
61#define QSPI_RX_FIFO_FULL BIT(1)
62#define QSPI_TX_FIFO_EMPTY BIT(2)
63#define QSPI_TX_FIFO_FULL BIT(3)
64#define QSPI_RX_FIFO_UNF BIT(4)
65#define QSPI_RX_FIFO_OVF BIT(5)
66#define QSPI_TX_FIFO_UNF BIT(6)
67#define QSPI_TX_FIFO_OVF BIT(7)
68#define QSPI_ERR BIT(8)
69#define QSPI_TX_FIFO_FLUSH BIT(14)
70#define QSPI_RX_FIFO_FLUSH BIT(15)
71#define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f)
72#define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f)
73
74#define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \
75 QSPI_RX_FIFO_OVF | \
76 QSPI_TX_FIFO_UNF | \
77 QSPI_TX_FIFO_OVF)
78#define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \
79 QSPI_TX_FIFO_EMPTY)
80
81#define QSPI_TX_DATA 0x018
82#define QSPI_RX_DATA 0x01c
83
84#define QSPI_DMA_CTL 0x020
85#define QSPI_TX_TRIG(n) (((n) & 0x3) << 15)
86#define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0)
87#define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1)
88#define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2)
89#define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3)
90
91#define QSPI_RX_TRIG(n) (((n) & 0x3) << 19)
92#define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0)
93#define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1)
94#define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2)
95#define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3)
96
97#define QSPI_DMA_EN BIT(31)
98
99#define QSPI_DMA_BLK 0x024
100#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
101
102#define QSPI_TX_FIFO 0x108
103#define QSPI_RX_FIFO 0x188
104
105#define QSPI_FIFO_DEPTH 64
106
107#define QSPI_INTR_MASK 0x18c
108#define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25)
109#define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26)
110#define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27)
111#define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28)
112#define QSPI_INTR_RDY_MASK BIT(29)
113#define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \
114 QSPI_INTR_RX_FIFO_OVF_MASK | \
115 QSPI_INTR_TX_FIFO_UNF_MASK | \
116 QSPI_INTR_TX_FIFO_OVF_MASK)
117
118#define QSPI_MISC_REG 0x194
119#define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
120
121#define DATA_DIR_TX BIT(0)
122#define DATA_DIR_RX BIT(1)
123
124#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
125#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
126
127struct tegra_qspi_client_data {
128 int tx_clk_tap_delay;
129 int rx_clk_tap_delay;
130};
131
132struct tegra_qspi {
133 struct device *dev;
134 struct spi_master *master;
135 /* lock to protect data accessed by irq */
136 spinlock_t lock;
137
138 struct clk *clk;
139 struct reset_control *rst;
140 void __iomem *base;
141 phys_addr_t phys;
142 unsigned int irq;
143
144 u32 cur_speed;
145 unsigned int cur_pos;
146 unsigned int words_per_32bit;
147 unsigned int bytes_per_word;
148 unsigned int curr_dma_words;
149 unsigned int cur_direction;
150
151 unsigned int cur_rx_pos;
152 unsigned int cur_tx_pos;
153
154 unsigned int dma_buf_size;
155 unsigned int max_buf_size;
156 bool is_curr_dma_xfer;
157
158 struct completion rx_dma_complete;
159 struct completion tx_dma_complete;
160
161 u32 tx_status;
162 u32 rx_status;
163 u32 status_reg;
164 bool is_packed;
165 bool use_dma;
166
167 u32 command1_reg;
168 u32 dma_control_reg;
169 u32 def_command1_reg;
170 u32 def_command2_reg;
171 u32 spi_cs_timing1;
172 u32 spi_cs_timing2;
173
174 struct completion xfer_completion;
175 struct spi_transfer *curr_xfer;
176
177 struct dma_chan *rx_dma_chan;
178 u32 *rx_dma_buf;
179 dma_addr_t rx_dma_phys;
180 struct dma_async_tx_descriptor *rx_dma_desc;
181
182 struct dma_chan *tx_dma_chan;
183 u32 *tx_dma_buf;
184 dma_addr_t tx_dma_phys;
185 struct dma_async_tx_descriptor *tx_dma_desc;
186};
187
188static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
189{
190 return readl(tqspi->base + offset);
191}
192
193static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
194{
195 writel(value, tqspi->base + offset);
196
197 /* read back register to make sure that register writes completed */
198 if (offset != QSPI_TX_FIFO)
199 readl(tqspi->base + QSPI_COMMAND1);
200}
201
202static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
203{
204 u32 value;
205
206 /* write 1 to clear status register */
207 value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
208 tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
209
210 value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
211 if (!(value & QSPI_INTR_RDY_MASK)) {
212 value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
213 tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
214 }
215
216 /* clear fifo status error if any */
217 value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
218 if (value & QSPI_ERR)
219 tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
220}
221
222static unsigned int
223tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
224{
225 unsigned int max_word, max_len, total_fifo_words;
226 unsigned int remain_len = t->len - tqspi->cur_pos;
227 unsigned int bits_per_word = t->bits_per_word;
228
229 tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
230
231 /*
232 * Tegra QSPI controller supports packed or unpacked mode transfers.
233 * Packed mode is used for data transfers using 8, 16, or 32 bits per
234 * word with a minimum transfer of 1 word and for all other transfers
235 * unpacked mode will be used.
236 */
237
238 if ((bits_per_word == 8 || bits_per_word == 16 ||
239 bits_per_word == 32) && t->len > 3) {
240 tqspi->is_packed = true;
241 tqspi->words_per_32bit = 32 / bits_per_word;
242 } else {
243 tqspi->is_packed = false;
244 tqspi->words_per_32bit = 1;
245 }
246
247 if (tqspi->is_packed) {
248 max_len = min(remain_len, tqspi->max_buf_size);
249 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
250 total_fifo_words = (max_len + 3) / 4;
251 } else {
252 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
253 max_word = min(max_word, tqspi->max_buf_size / 4);
254 tqspi->curr_dma_words = max_word;
255 total_fifo_words = max_word;
256 }
257
258 return total_fifo_words;
259}
260
261static unsigned int
262tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
263{
264 unsigned int written_words, fifo_words_left, count;
265 unsigned int len, tx_empty_count, max_n_32bit, i;
266 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
267 u32 fifo_status;
268
269 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
270 tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
271
272 if (tqspi->is_packed) {
273 fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
274 written_words = min(fifo_words_left, tqspi->curr_dma_words);
275 len = written_words * tqspi->bytes_per_word;
276 max_n_32bit = DIV_ROUND_UP(len, 4);
277 for (count = 0; count < max_n_32bit; count++) {
278 u32 x = 0;
279
280 for (i = 0; (i < 4) && len; i++, len--)
281 x |= (u32)(*tx_buf++) << (i * 8);
282 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
283 }
284
285 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
286 } else {
287 unsigned int write_bytes;
288 u8 bytes_per_word = tqspi->bytes_per_word;
289
290 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
291 written_words = max_n_32bit;
292 len = written_words * tqspi->bytes_per_word;
293 if (len > t->len - tqspi->cur_pos)
294 len = t->len - tqspi->cur_pos;
295 write_bytes = len;
296 for (count = 0; count < max_n_32bit; count++) {
297 u32 x = 0;
298
299 for (i = 0; len && (i < bytes_per_word); i++, len--)
300 x |= (u32)(*tx_buf++) << (i * 8);
301 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
302 }
303
304 tqspi->cur_tx_pos += write_bytes;
305 }
306
307 return written_words;
308}
309
310static unsigned int
311tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
312{
313 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
314 unsigned int len, rx_full_count, count, i;
315 unsigned int read_words = 0;
316 u32 fifo_status, x;
317
318 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
319 rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
320 if (tqspi->is_packed) {
321 len = tqspi->curr_dma_words * tqspi->bytes_per_word;
322 for (count = 0; count < rx_full_count; count++) {
323 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
324
325 for (i = 0; len && (i < 4); i++, len--)
326 *rx_buf++ = (x >> i * 8) & 0xff;
327 }
328
329 read_words += tqspi->curr_dma_words;
330 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
331 } else {
332 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
333 u8 bytes_per_word = tqspi->bytes_per_word;
334 unsigned int read_bytes;
335
336 len = rx_full_count * bytes_per_word;
337 if (len > t->len - tqspi->cur_pos)
338 len = t->len - tqspi->cur_pos;
339 read_bytes = len;
340 for (count = 0; count < rx_full_count; count++) {
341 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
342
343 for (i = 0; len && (i < bytes_per_word); i++, len--)
344 *rx_buf++ = (x >> (i * 8)) & 0xff;
345 }
346
347 read_words += rx_full_count;
348 tqspi->cur_rx_pos += read_bytes;
349 }
350
351 return read_words;
352}
353
354static void
355tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
356{
357 dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
358 tqspi->dma_buf_size, DMA_TO_DEVICE);
359
360 /*
361 * In packed mode, each word in FIFO may contain multiple packets
362 * based on bits per word. So all bytes in each FIFO word are valid.
363 *
364 * In unpacked mode, each word in FIFO contains single packet and
365 * based on bits per word any remaining bits in FIFO word will be
366 * ignored by the hardware and are invalid bits.
367 */
368 if (tqspi->is_packed) {
369 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
370 } else {
371 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
372 unsigned int i, count, consume, write_bytes;
373
374 /*
375 * Fill tx_dma_buf to contain single packet in each word based
376 * on bits per word from SPI core tx_buf.
377 */
378 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
379 if (consume > t->len - tqspi->cur_pos)
380 consume = t->len - tqspi->cur_pos;
381 write_bytes = consume;
382 for (count = 0; count < tqspi->curr_dma_words; count++) {
383 u32 x = 0;
384
385 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
386 x |= (u32)(*tx_buf++) << (i * 8);
387 tqspi->tx_dma_buf[count] = x;
388 }
389
390 tqspi->cur_tx_pos += write_bytes;
391 }
392
393 dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
394 tqspi->dma_buf_size, DMA_TO_DEVICE);
395}
396
397static void
398tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
399{
400 dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
401 tqspi->dma_buf_size, DMA_FROM_DEVICE);
402
403 if (tqspi->is_packed) {
404 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
405 } else {
406 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
407 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
408 unsigned int i, count, consume, read_bytes;
409
410 /*
411 * Each FIFO word contains single data packet.
412 * Skip invalid bits in each FIFO word based on bits per word
413 * and align bytes while filling in SPI core rx_buf.
414 */
415 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
416 if (consume > t->len - tqspi->cur_pos)
417 consume = t->len - tqspi->cur_pos;
418 read_bytes = consume;
419 for (count = 0; count < tqspi->curr_dma_words; count++) {
420 u32 x = tqspi->rx_dma_buf[count] & rx_mask;
421
422 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
423 *rx_buf++ = (x >> (i * 8)) & 0xff;
424 }
425
426 tqspi->cur_rx_pos += read_bytes;
427 }
428
429 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
430 tqspi->dma_buf_size, DMA_FROM_DEVICE);
431}
432
433static void tegra_qspi_dma_complete(void *args)
434{
435 struct completion *dma_complete = args;
436
437 complete(dma_complete);
438}
439
440static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
441{
442 dma_addr_t tx_dma_phys;
443
444 reinit_completion(&tqspi->tx_dma_complete);
445
446 if (tqspi->is_packed)
447 tx_dma_phys = t->tx_dma;
448 else
449 tx_dma_phys = tqspi->tx_dma_phys;
450
451 tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
452 len, DMA_MEM_TO_DEV,
453 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
454
455 if (!tqspi->tx_dma_desc) {
456 dev_err(tqspi->dev, "Unable to get TX descriptor\n");
457 return -EIO;
458 }
459
460 tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
461 tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
462 dmaengine_submit(tqspi->tx_dma_desc);
463 dma_async_issue_pending(tqspi->tx_dma_chan);
464
465 return 0;
466}
467
468static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
469{
470 dma_addr_t rx_dma_phys;
471
472 reinit_completion(&tqspi->rx_dma_complete);
473
474 if (tqspi->is_packed)
475 rx_dma_phys = t->rx_dma;
476 else
477 rx_dma_phys = tqspi->rx_dma_phys;
478
479 tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
480 len, DMA_DEV_TO_MEM,
481 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
482
483 if (!tqspi->rx_dma_desc) {
484 dev_err(tqspi->dev, "Unable to get RX descriptor\n");
485 return -EIO;
486 }
487
488 tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
489 tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
490 dmaengine_submit(tqspi->rx_dma_desc);
491 dma_async_issue_pending(tqspi->rx_dma_chan);
492
493 return 0;
494}
495
496static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
497{
498 void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
499 u32 val;
500
501 val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
502 if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
503 return 0;
504
505 val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
506 tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
507
508 if (!atomic)
509 return readl_relaxed_poll_timeout(addr, val,
510 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
511 1000, 1000000);
512
513 return readl_relaxed_poll_timeout_atomic(addr, val,
514 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
515 1000, 1000000);
516}
517
518static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
519{
520 u32 intr_mask;
521
522 intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
523 intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
524 tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
525}
526
527static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
528{
529 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
530 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
531 unsigned int len;
532
533 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
534
535 if (t->tx_buf) {
536 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
537 if (dma_mapping_error(tqspi->dev, t->tx_dma))
538 return -ENOMEM;
539 }
540
541 if (t->rx_buf) {
542 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
543 if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
544 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
545 return -ENOMEM;
546 }
547 }
548
549 return 0;
550}
551
552static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
553{
554 unsigned int len;
555
556 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
557
558 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
559 dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
560}
561
562static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
563{
564 struct dma_slave_config dma_sconfig = { 0 };
565 unsigned int len;
566 u8 dma_burst;
567 int ret = 0;
568 u32 val;
569
570 if (tqspi->is_packed) {
571 ret = tegra_qspi_dma_map_xfer(tqspi, t);
572 if (ret < 0)
573 return ret;
574 }
575
576 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
577 tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
578
579 tegra_qspi_unmask_irq(tqspi);
580
581 if (tqspi->is_packed)
582 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
583 else
584 len = tqspi->curr_dma_words * 4;
585
586 /* set attention level based on length of transfer */
587 val = 0;
588 if (len & 0xf) {
589 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
590 dma_burst = 1;
591 } else if (((len) >> 4) & 0x1) {
592 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
593 dma_burst = 4;
594 } else {
595 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
596 dma_burst = 8;
597 }
598
599 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
600 tqspi->dma_control_reg = val;
601
602 dma_sconfig.device_fc = true;
603 if (tqspi->cur_direction & DATA_DIR_TX) {
604 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
605 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
606 dma_sconfig.dst_maxburst = dma_burst;
607 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
608 if (ret < 0) {
609 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
610 return ret;
611 }
612
613 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
614 ret = tegra_qspi_start_tx_dma(tqspi, t, len);
615 if (ret < 0) {
616 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
617 return ret;
618 }
619 }
620
621 if (tqspi->cur_direction & DATA_DIR_RX) {
622 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
623 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
624 dma_sconfig.src_maxburst = dma_burst;
625 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
626 if (ret < 0) {
627 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
628 return ret;
629 }
630
631 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
632 tqspi->dma_buf_size,
633 DMA_FROM_DEVICE);
634
635 ret = tegra_qspi_start_rx_dma(tqspi, t, len);
636 if (ret < 0) {
637 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
638 if (tqspi->cur_direction & DATA_DIR_TX)
639 dmaengine_terminate_all(tqspi->tx_dma_chan);
640 return ret;
641 }
642 }
643
644 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
645
646 tqspi->is_curr_dma_xfer = true;
647 tqspi->dma_control_reg = val;
648 val |= QSPI_DMA_EN;
649 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
650
651 return ret;
652}
653
654static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
655{
656 u32 val;
657 unsigned int cur_words;
658
659 if (qspi->cur_direction & DATA_DIR_TX)
660 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
661 else
662 cur_words = qspi->curr_dma_words;
663
664 val = QSPI_DMA_BLK_SET(cur_words - 1);
665 tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
666
667 tegra_qspi_unmask_irq(qspi);
668
669 qspi->is_curr_dma_xfer = false;
670 val = qspi->command1_reg;
671 val |= QSPI_PIO;
672 tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
673
674 return 0;
675}
676
677static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
678{
679 if (tqspi->tx_dma_buf) {
680 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
681 tqspi->tx_dma_buf, tqspi->tx_dma_phys);
682 tqspi->tx_dma_buf = NULL;
683 }
684
685 if (tqspi->tx_dma_chan) {
686 dma_release_channel(tqspi->tx_dma_chan);
687 tqspi->tx_dma_chan = NULL;
688 }
689
690 if (tqspi->rx_dma_buf) {
691 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
692 tqspi->rx_dma_buf, tqspi->rx_dma_phys);
693 tqspi->rx_dma_buf = NULL;
694 }
695
696 if (tqspi->rx_dma_chan) {
697 dma_release_channel(tqspi->rx_dma_chan);
698 tqspi->rx_dma_chan = NULL;
699 }
700}
701
702static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
703{
704 struct dma_chan *dma_chan;
705 dma_addr_t dma_phys;
706 u32 *dma_buf;
707 int err;
708
709 dma_chan = dma_request_chan(tqspi->dev, "rx");
710 if (IS_ERR(dma_chan)) {
711 err = PTR_ERR(dma_chan);
712 goto err_out;
713 }
714
715 tqspi->rx_dma_chan = dma_chan;
716
717 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
718 if (!dma_buf) {
719 err = -ENOMEM;
720 goto err_out;
721 }
722
723 tqspi->rx_dma_buf = dma_buf;
724 tqspi->rx_dma_phys = dma_phys;
725
726 dma_chan = dma_request_chan(tqspi->dev, "tx");
727 if (IS_ERR(dma_chan)) {
728 err = PTR_ERR(dma_chan);
729 goto err_out;
730 }
731
732 tqspi->tx_dma_chan = dma_chan;
733
734 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
735 if (!dma_buf) {
736 err = -ENOMEM;
737 goto err_out;
738 }
739
740 tqspi->tx_dma_buf = dma_buf;
741 tqspi->tx_dma_phys = dma_phys;
742 tqspi->use_dma = true;
743
744 return 0;
745
746err_out:
747 tegra_qspi_deinit_dma(tqspi);
748
749 if (err != -EPROBE_DEFER) {
750 dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
751 dev_err(tqspi->dev, "falling back to PIO\n");
752 return 0;
753 }
754
755 return err;
756}
757
758static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
759 bool is_first_of_msg)
760{
761 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
762 struct tegra_qspi_client_data *cdata = spi->controller_data;
763 u32 command1, command2, speed = t->speed_hz;
764 u8 bits_per_word = t->bits_per_word;
765 u32 tx_tap = 0, rx_tap = 0;
766 int req_mode;
767
768 if (speed != tqspi->cur_speed) {
769 clk_set_rate(tqspi->clk, speed);
770 tqspi->cur_speed = speed;
771 }
772
773 tqspi->cur_pos = 0;
774 tqspi->cur_rx_pos = 0;
775 tqspi->cur_tx_pos = 0;
776 tqspi->curr_xfer = t;
777
778 if (is_first_of_msg) {
779 tegra_qspi_mask_clear_irq(tqspi);
780
781 command1 = tqspi->def_command1_reg;
782 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
783
784 command1 &= ~QSPI_CONTROL_MODE_MASK;
785 req_mode = spi->mode & 0x3;
786 if (req_mode == SPI_MODE_3)
787 command1 |= QSPI_CONTROL_MODE_3;
788 else
789 command1 |= QSPI_CONTROL_MODE_0;
790
791 if (spi->mode & SPI_CS_HIGH)
792 command1 |= QSPI_CS_SW_VAL;
793 else
794 command1 &= ~QSPI_CS_SW_VAL;
795 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
796
797 if (cdata && cdata->tx_clk_tap_delay)
798 tx_tap = cdata->tx_clk_tap_delay;
799
800 if (cdata && cdata->rx_clk_tap_delay)
801 rx_tap = cdata->rx_clk_tap_delay;
802
803 command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
804 if (command2 != tqspi->def_command2_reg)
805 tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
806
807 } else {
808 command1 = tqspi->command1_reg;
809 command1 &= ~QSPI_BIT_LENGTH(~0);
810 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
811 }
812
813 command1 &= ~QSPI_SDR_DDR_SEL;
814
815 return command1;
816}
817
818static int tegra_qspi_start_transfer_one(struct spi_device *spi,
819 struct spi_transfer *t, u32 command1)
820{
821 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
822 unsigned int total_fifo_words;
823 u8 bus_width = 0;
824 int ret;
825
826 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
827
828 command1 &= ~QSPI_PACKED;
829 if (tqspi->is_packed)
830 command1 |= QSPI_PACKED;
831 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
832
833 tqspi->cur_direction = 0;
834
835 command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
836 if (t->rx_buf) {
837 command1 |= QSPI_RX_EN;
838 tqspi->cur_direction |= DATA_DIR_RX;
839 bus_width = t->rx_nbits;
840 }
841
842 if (t->tx_buf) {
843 command1 |= QSPI_TX_EN;
844 tqspi->cur_direction |= DATA_DIR_TX;
845 bus_width = t->tx_nbits;
846 }
847
848 command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
849
850 if (bus_width == SPI_NBITS_QUAD)
851 command1 |= QSPI_INTERFACE_WIDTH_QUAD;
852 else if (bus_width == SPI_NBITS_DUAL)
853 command1 |= QSPI_INTERFACE_WIDTH_DUAL;
854 else
855 command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
856
857 tqspi->command1_reg = command1;
858
859 ret = tegra_qspi_flush_fifos(tqspi, false);
860 if (ret < 0)
861 return ret;
862
863 if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
864 ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
865 else
866 ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
867
868 return ret;
869}
870
871static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
872{
873 struct tegra_qspi_client_data *cdata;
874 struct device_node *slave_np = spi->dev.of_node;
875
876 cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
877 if (!cdata)
878 return NULL;
879
880 of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
881 &cdata->tx_clk_tap_delay);
882 of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
883 &cdata->rx_clk_tap_delay);
884 return cdata;
885}
886
887static void tegra_qspi_cleanup(struct spi_device *spi)
888{
889 struct tegra_qspi_client_data *cdata = spi->controller_data;
890
891 spi->controller_data = NULL;
892 kfree(cdata);
893}
894
895static int tegra_qspi_setup(struct spi_device *spi)
896{
897 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
898 struct tegra_qspi_client_data *cdata = spi->controller_data;
899 unsigned long flags;
900 u32 val;
901 int ret;
902
903 ret = pm_runtime_resume_and_get(tqspi->dev);
904 if (ret < 0) {
905 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
906 return ret;
907 }
908
909 if (!cdata) {
910 cdata = tegra_qspi_parse_cdata_dt(spi);
911 spi->controller_data = cdata;
912 }
913
914 spin_lock_irqsave(&tqspi->lock, flags);
915
916 /* keep default cs state to inactive */
917 val = tqspi->def_command1_reg;
918 if (spi->mode & SPI_CS_HIGH)
919 val &= ~QSPI_CS_SW_VAL;
920 else
921 val |= QSPI_CS_SW_VAL;
922
923 tqspi->def_command1_reg = val;
924 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
925
926 spin_unlock_irqrestore(&tqspi->lock, flags);
927
928 pm_runtime_put(tqspi->dev);
929
930 return 0;
931}
932
933static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
934{
935 dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
936 dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
937 tegra_qspi_readl(tqspi, QSPI_COMMAND1),
938 tegra_qspi_readl(tqspi, QSPI_COMMAND2));
939 dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
940 tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
941 tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
942 dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n",
943 tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
944 tegra_qspi_readl(tqspi, QSPI_MISC_REG));
945 dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
946 tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
947 tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
948}
949
950static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
951{
952 dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
953 tegra_qspi_dump_regs(tqspi);
954 tegra_qspi_flush_fifos(tqspi, true);
955 reset_control_assert(tqspi->rst);
956 udelay(2);
957 reset_control_deassert(tqspi->rst);
958}
959
960static void tegra_qspi_transfer_end(struct spi_device *spi)
961{
962 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
963 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
964
965 if (cs_val)
966 tqspi->command1_reg |= QSPI_CS_SW_VAL;
967 else
968 tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
969 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
970 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
971}
972
973static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi_message *msg)
974{
975 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
976 struct spi_device *spi = msg->spi;
977 struct spi_transfer *xfer;
978 bool is_first_msg = true;
979 int ret;
980
981 msg->status = 0;
982 msg->actual_length = 0;
983 tqspi->tx_status = 0;
984 tqspi->rx_status = 0;
985
986 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
987 u32 cmd1;
988
989 reinit_completion(&tqspi->xfer_completion);
990
991 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
992
993 ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
994 if (ret < 0) {
995 dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
996 goto complete_xfer;
997 }
998
999 is_first_msg = false;
1000 ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1001 QSPI_DMA_TIMEOUT);
1002 if (WARN_ON(ret == 0)) {
1003 dev_err(tqspi->dev, "transfer timeout: %d\n", ret);
1004 if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1005 dmaengine_terminate_all(tqspi->tx_dma_chan);
1006 if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1007 dmaengine_terminate_all(tqspi->rx_dma_chan);
1008 tegra_qspi_handle_error(tqspi);
1009 ret = -EIO;
1010 goto complete_xfer;
1011 }
1012
1013 if (tqspi->tx_status || tqspi->rx_status) {
1014 tegra_qspi_handle_error(tqspi);
1015 ret = -EIO;
1016 goto complete_xfer;
1017 }
1018
1019 msg->actual_length += xfer->len;
1020
1021complete_xfer:
1022 if (ret < 0) {
1023 tegra_qspi_transfer_end(spi);
1024 spi_transfer_delay_exec(xfer);
1025 goto exit;
1026 }
1027
1028 if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1029 /* de-activate CS after last transfer only when cs_change is not set */
1030 if (!xfer->cs_change) {
1031 tegra_qspi_transfer_end(spi);
1032 spi_transfer_delay_exec(xfer);
1033 }
1034 } else if (xfer->cs_change) {
1035 /* de-activated CS between the transfers only when cs_change is set */
1036 tegra_qspi_transfer_end(spi);
1037 spi_transfer_delay_exec(xfer);
1038 }
1039 }
1040
1041 ret = 0;
1042exit:
1043 msg->status = ret;
1044 spi_finalize_current_message(master);
1045 return ret;
1046}
1047
1048static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1049{
1050 struct spi_transfer *t = tqspi->curr_xfer;
1051 unsigned long flags;
1052
1053 spin_lock_irqsave(&tqspi->lock, flags);
1054
1055 if (tqspi->tx_status || tqspi->rx_status) {
1056 tegra_qspi_handle_error(tqspi);
1057 complete(&tqspi->xfer_completion);
1058 goto exit;
1059 }
1060
1061 if (tqspi->cur_direction & DATA_DIR_RX)
1062 tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1063
1064 if (tqspi->cur_direction & DATA_DIR_TX)
1065 tqspi->cur_pos = tqspi->cur_tx_pos;
1066 else
1067 tqspi->cur_pos = tqspi->cur_rx_pos;
1068
1069 if (tqspi->cur_pos == t->len) {
1070 complete(&tqspi->xfer_completion);
1071 goto exit;
1072 }
1073
1074 tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1075 tegra_qspi_start_cpu_based_transfer(tqspi, t);
1076exit:
1077 spin_unlock_irqrestore(&tqspi->lock, flags);
1078 return IRQ_HANDLED;
1079}
1080
1081static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1082{
1083 struct spi_transfer *t = tqspi->curr_xfer;
1084 unsigned int total_fifo_words;
1085 unsigned long flags;
1086 long wait_status;
1087 int err = 0;
1088
1089 if (tqspi->cur_direction & DATA_DIR_TX) {
1090 if (tqspi->tx_status) {
1091 dmaengine_terminate_all(tqspi->tx_dma_chan);
1092 err += 1;
1093 } else {
1094 wait_status = wait_for_completion_interruptible_timeout(
1095 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1096 if (wait_status <= 0) {
1097 dmaengine_terminate_all(tqspi->tx_dma_chan);
1098 dev_err(tqspi->dev, "failed TX DMA transfer\n");
1099 err += 1;
1100 }
1101 }
1102 }
1103
1104 if (tqspi->cur_direction & DATA_DIR_RX) {
1105 if (tqspi->rx_status) {
1106 dmaengine_terminate_all(tqspi->rx_dma_chan);
1107 err += 2;
1108 } else {
1109 wait_status = wait_for_completion_interruptible_timeout(
1110 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1111 if (wait_status <= 0) {
1112 dmaengine_terminate_all(tqspi->rx_dma_chan);
1113 dev_err(tqspi->dev, "failed RX DMA transfer\n");
1114 err += 2;
1115 }
1116 }
1117 }
1118
1119 spin_lock_irqsave(&tqspi->lock, flags);
1120
1121 if (err) {
1122 tegra_qspi_dma_unmap_xfer(tqspi, t);
1123 tegra_qspi_handle_error(tqspi);
1124 complete(&tqspi->xfer_completion);
1125 goto exit;
1126 }
1127
1128 if (tqspi->cur_direction & DATA_DIR_RX)
1129 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1130
1131 if (tqspi->cur_direction & DATA_DIR_TX)
1132 tqspi->cur_pos = tqspi->cur_tx_pos;
1133 else
1134 tqspi->cur_pos = tqspi->cur_rx_pos;
1135
1136 if (tqspi->cur_pos == t->len) {
1137 tegra_qspi_dma_unmap_xfer(tqspi, t);
1138 complete(&tqspi->xfer_completion);
1139 goto exit;
1140 }
1141
1142 tegra_qspi_dma_unmap_xfer(tqspi, t);
1143
1144 /* continue transfer in current message */
1145 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1146 if (total_fifo_words > QSPI_FIFO_DEPTH)
1147 err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1148 else
1149 err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1150
1151exit:
1152 spin_unlock_irqrestore(&tqspi->lock, flags);
1153 return IRQ_HANDLED;
1154}
1155
1156static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1157{
1158 struct tegra_qspi *tqspi = context_data;
1159
1160 tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1161
1162 if (tqspi->cur_direction & DATA_DIR_TX)
1163 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1164
1165 if (tqspi->cur_direction & DATA_DIR_RX)
1166 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1167
1168 tegra_qspi_mask_clear_irq(tqspi);
1169
1170 if (!tqspi->is_curr_dma_xfer)
1171 return handle_cpu_based_xfer(tqspi);
1172
1173 return handle_dma_based_xfer(tqspi);
1174}
1175
1176static const struct of_device_id tegra_qspi_of_match[] = {
1177 { .compatible = "nvidia,tegra210-qspi", },
1178 { .compatible = "nvidia,tegra186-qspi", },
1179 { .compatible = "nvidia,tegra194-qspi", },
1180 {}
1181};
1182
1183MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1184
1185static int tegra_qspi_probe(struct platform_device *pdev)
1186{
1187 struct spi_master *master;
1188 struct tegra_qspi *tqspi;
1189 struct resource *r;
1190 int ret, qspi_irq;
1191 int bus_num;
1192
1193 master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
1194 if (!master)
1195 return -ENOMEM;
1196
1197 platform_set_drvdata(pdev, master);
1198 tqspi = spi_master_get_devdata(master);
1199
1200 master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1201 SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1202 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1203 master->setup = tegra_qspi_setup;
1204 master->cleanup = tegra_qspi_cleanup;
1205 master->transfer_one_message = tegra_qspi_transfer_one_message;
1206 master->num_chipselect = 1;
1207 master->auto_runtime_pm = true;
1208
1209 bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1210 if (bus_num >= 0)
1211 master->bus_num = bus_num;
1212
1213 tqspi->master = master;
1214 tqspi->dev = &pdev->dev;
1215 spin_lock_init(&tqspi->lock);
1216
1217 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1218 tqspi->base = devm_ioremap_resource(&pdev->dev, r);
1219 if (IS_ERR(tqspi->base))
1220 return PTR_ERR(tqspi->base);
1221
1222 tqspi->phys = r->start;
1223 qspi_irq = platform_get_irq(pdev, 0);
1224 tqspi->irq = qspi_irq;
1225
1226 tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1227 if (IS_ERR(tqspi->clk)) {
1228 ret = PTR_ERR(tqspi->clk);
1229 dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1230 return ret;
1231 }
1232
1233 tqspi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
1234 if (IS_ERR(tqspi->rst)) {
1235 ret = PTR_ERR(tqspi->rst);
1236 dev_err(&pdev->dev, "failed to get reset control: %d\n", ret);
1237 return ret;
1238 }
1239
1240 tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1241 tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1242
1243 ret = tegra_qspi_init_dma(tqspi);
1244 if (ret < 0)
1245 return ret;
1246
1247 if (tqspi->use_dma)
1248 tqspi->max_buf_size = tqspi->dma_buf_size;
1249
1250 init_completion(&tqspi->tx_dma_complete);
1251 init_completion(&tqspi->rx_dma_complete);
1252 init_completion(&tqspi->xfer_completion);
1253
1254 pm_runtime_enable(&pdev->dev);
1255 ret = pm_runtime_resume_and_get(&pdev->dev);
1256 if (ret < 0) {
1257 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1258 goto exit_pm_disable;
1259 }
1260
1261 reset_control_assert(tqspi->rst);
1262 udelay(2);
1263 reset_control_deassert(tqspi->rst);
1264
1265 tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
1266 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1267 tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1268 tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1269 tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1270
1271 pm_runtime_put(&pdev->dev);
1272
1273 ret = request_threaded_irq(tqspi->irq, NULL,
1274 tegra_qspi_isr_thread, IRQF_ONESHOT,
1275 dev_name(&pdev->dev), tqspi);
1276 if (ret < 0) {
1277 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1278 goto exit_pm_disable;
1279 }
1280
1281 master->dev.of_node = pdev->dev.of_node;
1282 ret = spi_register_master(master);
1283 if (ret < 0) {
1284 dev_err(&pdev->dev, "failed to register master: %d\n", ret);
1285 goto exit_free_irq;
1286 }
1287
1288 return 0;
1289
1290exit_free_irq:
1291 free_irq(qspi_irq, tqspi);
1292exit_pm_disable:
1293 pm_runtime_disable(&pdev->dev);
1294 tegra_qspi_deinit_dma(tqspi);
1295 return ret;
1296}
1297
1298static int tegra_qspi_remove(struct platform_device *pdev)
1299{
1300 struct spi_master *master = platform_get_drvdata(pdev);
1301 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1302
1303 spi_unregister_master(master);
1304 free_irq(tqspi->irq, tqspi);
1305 pm_runtime_disable(&pdev->dev);
1306 tegra_qspi_deinit_dma(tqspi);
1307
1308 return 0;
1309}
1310
1311static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1312{
1313 struct spi_master *master = dev_get_drvdata(dev);
1314
1315 return spi_master_suspend(master);
1316}
1317
1318static int __maybe_unused tegra_qspi_resume(struct device *dev)
1319{
1320 struct spi_master *master = dev_get_drvdata(dev);
1321 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1322 int ret;
1323
1324 ret = pm_runtime_resume_and_get(dev);
1325 if (ret < 0) {
1326 dev_err(dev, "failed to get runtime PM: %d\n", ret);
1327 return ret;
1328 }
1329
1330 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1331 tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1332 pm_runtime_put(dev);
1333
1334 return spi_master_resume(master);
1335}
1336
1337static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1338{
1339 struct spi_master *master = dev_get_drvdata(dev);
1340 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1341
1342 /* flush all write which are in PPSB queue by reading back */
1343 tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1344
1345 clk_disable_unprepare(tqspi->clk);
1346
1347 return 0;
1348}
1349
1350static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1351{
1352 struct spi_master *master = dev_get_drvdata(dev);
1353 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1354 int ret;
1355
1356 ret = clk_prepare_enable(tqspi->clk);
1357 if (ret < 0)
1358 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1359
1360 return ret;
1361}
1362
1363static const struct dev_pm_ops tegra_qspi_pm_ops = {
1364 SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1365 SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1366};
1367
1368static struct platform_driver tegra_qspi_driver = {
1369 .driver = {
1370 .name = "tegra-qspi",
1371 .pm = &tegra_qspi_pm_ops,
1372 .of_match_table = tegra_qspi_of_match,
1373 },
1374 .probe = tegra_qspi_probe,
1375 .remove = tegra_qspi_remove,
1376};
1377module_platform_driver(tegra_qspi_driver);
1378
1379MODULE_ALIAS("platform:qspi-tegra");
1380MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1381MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1382MODULE_LICENSE("GPL v2");