blob: ce1bdb4767ea38ce58aaf1cbd90aab0c5b1f015f [file] [log] [blame]
Sowjanya Komatineni921fc182020-12-21 13:17:34 -08001// SPDX-License-Identifier: GPL-2.0-only
2//
3// Copyright (C) 2020 NVIDIA CORPORATION.
4
5#include <linux/clk.h>
6#include <linux/completion.h>
7#include <linux/delay.h>
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmapool.h>
11#include <linux/err.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/iopoll.h>
15#include <linux/kernel.h>
16#include <linux/kthread.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/pm_runtime.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/reset.h>
23#include <linux/spi/spi.h>
24
25#define QSPI_COMMAND1 0x000
26#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
27#define QSPI_PACKED BIT(5)
28#define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7)
29#define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7)
30#define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0)
31#define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1)
32#define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2)
33#define QSPI_SDR_DDR_SEL BIT(9)
34#define QSPI_TX_EN BIT(11)
35#define QSPI_RX_EN BIT(12)
36#define QSPI_CS_SW_VAL BIT(20)
37#define QSPI_CS_SW_HW BIT(21)
38#define QSPI_CONTROL_MODE_0 (0 << 28)
39#define QSPI_CONTROL_MODE_3 (3 << 28)
40#define QSPI_CONTROL_MODE_MASK (3 << 28)
41#define QSPI_M_S BIT(30)
42#define QSPI_PIO BIT(31)
43
44#define QSPI_COMMAND2 0x004
45#define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10)
46#define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0)
47
48#define QSPI_CS_TIMING1 0x008
49#define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
50
51#define QSPI_CS_TIMING2 0x00c
52#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0)
53#define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5)
54
55#define QSPI_TRANS_STATUS 0x010
56#define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff)
57#define QSPI_RDY BIT(30)
58
59#define QSPI_FIFO_STATUS 0x014
60#define QSPI_RX_FIFO_EMPTY BIT(0)
61#define QSPI_RX_FIFO_FULL BIT(1)
62#define QSPI_TX_FIFO_EMPTY BIT(2)
63#define QSPI_TX_FIFO_FULL BIT(3)
64#define QSPI_RX_FIFO_UNF BIT(4)
65#define QSPI_RX_FIFO_OVF BIT(5)
66#define QSPI_TX_FIFO_UNF BIT(6)
67#define QSPI_TX_FIFO_OVF BIT(7)
68#define QSPI_ERR BIT(8)
69#define QSPI_TX_FIFO_FLUSH BIT(14)
70#define QSPI_RX_FIFO_FLUSH BIT(15)
71#define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f)
72#define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f)
73
74#define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \
75 QSPI_RX_FIFO_OVF | \
76 QSPI_TX_FIFO_UNF | \
77 QSPI_TX_FIFO_OVF)
78#define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \
79 QSPI_TX_FIFO_EMPTY)
80
81#define QSPI_TX_DATA 0x018
82#define QSPI_RX_DATA 0x01c
83
84#define QSPI_DMA_CTL 0x020
85#define QSPI_TX_TRIG(n) (((n) & 0x3) << 15)
86#define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0)
87#define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1)
88#define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2)
89#define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3)
90
91#define QSPI_RX_TRIG(n) (((n) & 0x3) << 19)
92#define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0)
93#define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1)
94#define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2)
95#define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3)
96
97#define QSPI_DMA_EN BIT(31)
98
99#define QSPI_DMA_BLK 0x024
100#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
101
102#define QSPI_TX_FIFO 0x108
103#define QSPI_RX_FIFO 0x188
104
105#define QSPI_FIFO_DEPTH 64
106
107#define QSPI_INTR_MASK 0x18c
108#define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25)
109#define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26)
110#define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27)
111#define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28)
112#define QSPI_INTR_RDY_MASK BIT(29)
113#define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \
114 QSPI_INTR_RX_FIFO_OVF_MASK | \
115 QSPI_INTR_TX_FIFO_UNF_MASK | \
116 QSPI_INTR_TX_FIFO_OVF_MASK)
117
118#define QSPI_MISC_REG 0x194
119#define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
Sowjanya Komatineni6a8a8b52020-12-21 13:17:36 -0800120#define QSPI_DUMMY_CYCLES_MAX 0xff
Sowjanya Komatineni921fc182020-12-21 13:17:34 -0800121
122#define DATA_DIR_TX BIT(0)
123#define DATA_DIR_RX BIT(1)
124
125#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
126#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
127
128struct tegra_qspi_client_data {
129 int tx_clk_tap_delay;
130 int rx_clk_tap_delay;
131};
132
133struct tegra_qspi {
134 struct device *dev;
135 struct spi_master *master;
136 /* lock to protect data accessed by irq */
137 spinlock_t lock;
138
139 struct clk *clk;
140 struct reset_control *rst;
141 void __iomem *base;
142 phys_addr_t phys;
143 unsigned int irq;
144
145 u32 cur_speed;
146 unsigned int cur_pos;
147 unsigned int words_per_32bit;
148 unsigned int bytes_per_word;
149 unsigned int curr_dma_words;
150 unsigned int cur_direction;
151
152 unsigned int cur_rx_pos;
153 unsigned int cur_tx_pos;
154
155 unsigned int dma_buf_size;
156 unsigned int max_buf_size;
157 bool is_curr_dma_xfer;
158
159 struct completion rx_dma_complete;
160 struct completion tx_dma_complete;
161
162 u32 tx_status;
163 u32 rx_status;
164 u32 status_reg;
165 bool is_packed;
166 bool use_dma;
167
168 u32 command1_reg;
169 u32 dma_control_reg;
170 u32 def_command1_reg;
171 u32 def_command2_reg;
172 u32 spi_cs_timing1;
173 u32 spi_cs_timing2;
Sowjanya Komatineni6a8a8b52020-12-21 13:17:36 -0800174 u8 dummy_cycles;
Sowjanya Komatineni921fc182020-12-21 13:17:34 -0800175
176 struct completion xfer_completion;
177 struct spi_transfer *curr_xfer;
178
179 struct dma_chan *rx_dma_chan;
180 u32 *rx_dma_buf;
181 dma_addr_t rx_dma_phys;
182 struct dma_async_tx_descriptor *rx_dma_desc;
183
184 struct dma_chan *tx_dma_chan;
185 u32 *tx_dma_buf;
186 dma_addr_t tx_dma_phys;
187 struct dma_async_tx_descriptor *tx_dma_desc;
188};
189
190static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
191{
192 return readl(tqspi->base + offset);
193}
194
195static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
196{
197 writel(value, tqspi->base + offset);
198
199 /* read back register to make sure that register writes completed */
200 if (offset != QSPI_TX_FIFO)
201 readl(tqspi->base + QSPI_COMMAND1);
202}
203
204static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
205{
206 u32 value;
207
208 /* write 1 to clear status register */
209 value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
210 tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
211
212 value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
213 if (!(value & QSPI_INTR_RDY_MASK)) {
214 value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
215 tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
216 }
217
218 /* clear fifo status error if any */
219 value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
220 if (value & QSPI_ERR)
221 tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
222}
223
224static unsigned int
225tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
226{
227 unsigned int max_word, max_len, total_fifo_words;
228 unsigned int remain_len = t->len - tqspi->cur_pos;
229 unsigned int bits_per_word = t->bits_per_word;
230
231 tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
232
233 /*
234 * Tegra QSPI controller supports packed or unpacked mode transfers.
235 * Packed mode is used for data transfers using 8, 16, or 32 bits per
236 * word with a minimum transfer of 1 word and for all other transfers
237 * unpacked mode will be used.
238 */
239
240 if ((bits_per_word == 8 || bits_per_word == 16 ||
241 bits_per_word == 32) && t->len > 3) {
242 tqspi->is_packed = true;
243 tqspi->words_per_32bit = 32 / bits_per_word;
244 } else {
245 tqspi->is_packed = false;
246 tqspi->words_per_32bit = 1;
247 }
248
249 if (tqspi->is_packed) {
250 max_len = min(remain_len, tqspi->max_buf_size);
251 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
252 total_fifo_words = (max_len + 3) / 4;
253 } else {
254 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
255 max_word = min(max_word, tqspi->max_buf_size / 4);
256 tqspi->curr_dma_words = max_word;
257 total_fifo_words = max_word;
258 }
259
260 return total_fifo_words;
261}
262
263static unsigned int
264tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
265{
266 unsigned int written_words, fifo_words_left, count;
267 unsigned int len, tx_empty_count, max_n_32bit, i;
268 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
269 u32 fifo_status;
270
271 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
272 tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
273
274 if (tqspi->is_packed) {
275 fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
276 written_words = min(fifo_words_left, tqspi->curr_dma_words);
277 len = written_words * tqspi->bytes_per_word;
278 max_n_32bit = DIV_ROUND_UP(len, 4);
279 for (count = 0; count < max_n_32bit; count++) {
280 u32 x = 0;
281
282 for (i = 0; (i < 4) && len; i++, len--)
283 x |= (u32)(*tx_buf++) << (i * 8);
284 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
285 }
286
287 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
288 } else {
289 unsigned int write_bytes;
290 u8 bytes_per_word = tqspi->bytes_per_word;
291
292 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
293 written_words = max_n_32bit;
294 len = written_words * tqspi->bytes_per_word;
295 if (len > t->len - tqspi->cur_pos)
296 len = t->len - tqspi->cur_pos;
297 write_bytes = len;
298 for (count = 0; count < max_n_32bit; count++) {
299 u32 x = 0;
300
301 for (i = 0; len && (i < bytes_per_word); i++, len--)
302 x |= (u32)(*tx_buf++) << (i * 8);
303 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
304 }
305
306 tqspi->cur_tx_pos += write_bytes;
307 }
308
309 return written_words;
310}
311
312static unsigned int
313tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
314{
315 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
316 unsigned int len, rx_full_count, count, i;
317 unsigned int read_words = 0;
318 u32 fifo_status, x;
319
320 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
321 rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
322 if (tqspi->is_packed) {
323 len = tqspi->curr_dma_words * tqspi->bytes_per_word;
324 for (count = 0; count < rx_full_count; count++) {
325 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
326
327 for (i = 0; len && (i < 4); i++, len--)
328 *rx_buf++ = (x >> i * 8) & 0xff;
329 }
330
331 read_words += tqspi->curr_dma_words;
332 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
333 } else {
334 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
335 u8 bytes_per_word = tqspi->bytes_per_word;
336 unsigned int read_bytes;
337
338 len = rx_full_count * bytes_per_word;
339 if (len > t->len - tqspi->cur_pos)
340 len = t->len - tqspi->cur_pos;
341 read_bytes = len;
342 for (count = 0; count < rx_full_count; count++) {
343 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
344
345 for (i = 0; len && (i < bytes_per_word); i++, len--)
346 *rx_buf++ = (x >> (i * 8)) & 0xff;
347 }
348
349 read_words += rx_full_count;
350 tqspi->cur_rx_pos += read_bytes;
351 }
352
353 return read_words;
354}
355
356static void
357tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
358{
359 dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
360 tqspi->dma_buf_size, DMA_TO_DEVICE);
361
362 /*
363 * In packed mode, each word in FIFO may contain multiple packets
364 * based on bits per word. So all bytes in each FIFO word are valid.
365 *
366 * In unpacked mode, each word in FIFO contains single packet and
367 * based on bits per word any remaining bits in FIFO word will be
368 * ignored by the hardware and are invalid bits.
369 */
370 if (tqspi->is_packed) {
371 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
372 } else {
373 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
374 unsigned int i, count, consume, write_bytes;
375
376 /*
377 * Fill tx_dma_buf to contain single packet in each word based
378 * on bits per word from SPI core tx_buf.
379 */
380 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
381 if (consume > t->len - tqspi->cur_pos)
382 consume = t->len - tqspi->cur_pos;
383 write_bytes = consume;
384 for (count = 0; count < tqspi->curr_dma_words; count++) {
385 u32 x = 0;
386
387 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
388 x |= (u32)(*tx_buf++) << (i * 8);
389 tqspi->tx_dma_buf[count] = x;
390 }
391
392 tqspi->cur_tx_pos += write_bytes;
393 }
394
395 dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
396 tqspi->dma_buf_size, DMA_TO_DEVICE);
397}
398
399static void
400tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
401{
402 dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
403 tqspi->dma_buf_size, DMA_FROM_DEVICE);
404
405 if (tqspi->is_packed) {
406 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
407 } else {
408 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
409 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
410 unsigned int i, count, consume, read_bytes;
411
412 /*
413 * Each FIFO word contains single data packet.
414 * Skip invalid bits in each FIFO word based on bits per word
415 * and align bytes while filling in SPI core rx_buf.
416 */
417 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
418 if (consume > t->len - tqspi->cur_pos)
419 consume = t->len - tqspi->cur_pos;
420 read_bytes = consume;
421 for (count = 0; count < tqspi->curr_dma_words; count++) {
422 u32 x = tqspi->rx_dma_buf[count] & rx_mask;
423
424 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
425 *rx_buf++ = (x >> (i * 8)) & 0xff;
426 }
427
428 tqspi->cur_rx_pos += read_bytes;
429 }
430
431 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
432 tqspi->dma_buf_size, DMA_FROM_DEVICE);
433}
434
435static void tegra_qspi_dma_complete(void *args)
436{
437 struct completion *dma_complete = args;
438
439 complete(dma_complete);
440}
441
442static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
443{
444 dma_addr_t tx_dma_phys;
445
446 reinit_completion(&tqspi->tx_dma_complete);
447
448 if (tqspi->is_packed)
449 tx_dma_phys = t->tx_dma;
450 else
451 tx_dma_phys = tqspi->tx_dma_phys;
452
453 tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
454 len, DMA_MEM_TO_DEV,
455 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
456
457 if (!tqspi->tx_dma_desc) {
458 dev_err(tqspi->dev, "Unable to get TX descriptor\n");
459 return -EIO;
460 }
461
462 tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
463 tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
464 dmaengine_submit(tqspi->tx_dma_desc);
465 dma_async_issue_pending(tqspi->tx_dma_chan);
466
467 return 0;
468}
469
470static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
471{
472 dma_addr_t rx_dma_phys;
473
474 reinit_completion(&tqspi->rx_dma_complete);
475
476 if (tqspi->is_packed)
477 rx_dma_phys = t->rx_dma;
478 else
479 rx_dma_phys = tqspi->rx_dma_phys;
480
481 tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
482 len, DMA_DEV_TO_MEM,
483 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
484
485 if (!tqspi->rx_dma_desc) {
486 dev_err(tqspi->dev, "Unable to get RX descriptor\n");
487 return -EIO;
488 }
489
490 tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
491 tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
492 dmaengine_submit(tqspi->rx_dma_desc);
493 dma_async_issue_pending(tqspi->rx_dma_chan);
494
495 return 0;
496}
497
498static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
499{
500 void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
501 u32 val;
502
503 val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
504 if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
505 return 0;
506
507 val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
508 tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
509
510 if (!atomic)
511 return readl_relaxed_poll_timeout(addr, val,
512 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
513 1000, 1000000);
514
515 return readl_relaxed_poll_timeout_atomic(addr, val,
516 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
517 1000, 1000000);
518}
519
520static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
521{
522 u32 intr_mask;
523
524 intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
525 intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
526 tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
527}
528
529static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
530{
531 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
532 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
533 unsigned int len;
534
535 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
536
537 if (t->tx_buf) {
538 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
539 if (dma_mapping_error(tqspi->dev, t->tx_dma))
540 return -ENOMEM;
541 }
542
543 if (t->rx_buf) {
544 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
545 if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
546 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
547 return -ENOMEM;
548 }
549 }
550
551 return 0;
552}
553
554static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
555{
556 unsigned int len;
557
558 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
559
560 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
561 dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
562}
563
564static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
565{
566 struct dma_slave_config dma_sconfig = { 0 };
567 unsigned int len;
568 u8 dma_burst;
569 int ret = 0;
570 u32 val;
571
572 if (tqspi->is_packed) {
573 ret = tegra_qspi_dma_map_xfer(tqspi, t);
574 if (ret < 0)
575 return ret;
576 }
577
578 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
579 tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
580
581 tegra_qspi_unmask_irq(tqspi);
582
583 if (tqspi->is_packed)
584 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
585 else
586 len = tqspi->curr_dma_words * 4;
587
588 /* set attention level based on length of transfer */
589 val = 0;
590 if (len & 0xf) {
591 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
592 dma_burst = 1;
593 } else if (((len) >> 4) & 0x1) {
594 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
595 dma_burst = 4;
596 } else {
597 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
598 dma_burst = 8;
599 }
600
601 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
602 tqspi->dma_control_reg = val;
603
604 dma_sconfig.device_fc = true;
605 if (tqspi->cur_direction & DATA_DIR_TX) {
606 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
607 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
608 dma_sconfig.dst_maxburst = dma_burst;
609 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
610 if (ret < 0) {
611 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
612 return ret;
613 }
614
615 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
616 ret = tegra_qspi_start_tx_dma(tqspi, t, len);
617 if (ret < 0) {
618 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
619 return ret;
620 }
621 }
622
623 if (tqspi->cur_direction & DATA_DIR_RX) {
624 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
625 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
626 dma_sconfig.src_maxburst = dma_burst;
627 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
628 if (ret < 0) {
629 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
630 return ret;
631 }
632
633 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
634 tqspi->dma_buf_size,
635 DMA_FROM_DEVICE);
636
637 ret = tegra_qspi_start_rx_dma(tqspi, t, len);
638 if (ret < 0) {
639 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
640 if (tqspi->cur_direction & DATA_DIR_TX)
641 dmaengine_terminate_all(tqspi->tx_dma_chan);
642 return ret;
643 }
644 }
645
646 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
647
648 tqspi->is_curr_dma_xfer = true;
649 tqspi->dma_control_reg = val;
650 val |= QSPI_DMA_EN;
651 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
652
653 return ret;
654}
655
656static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
657{
658 u32 val;
659 unsigned int cur_words;
660
661 if (qspi->cur_direction & DATA_DIR_TX)
662 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
663 else
664 cur_words = qspi->curr_dma_words;
665
666 val = QSPI_DMA_BLK_SET(cur_words - 1);
667 tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
668
669 tegra_qspi_unmask_irq(qspi);
670
671 qspi->is_curr_dma_xfer = false;
672 val = qspi->command1_reg;
673 val |= QSPI_PIO;
674 tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
675
676 return 0;
677}
678
679static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
680{
681 if (tqspi->tx_dma_buf) {
682 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
683 tqspi->tx_dma_buf, tqspi->tx_dma_phys);
684 tqspi->tx_dma_buf = NULL;
685 }
686
687 if (tqspi->tx_dma_chan) {
688 dma_release_channel(tqspi->tx_dma_chan);
689 tqspi->tx_dma_chan = NULL;
690 }
691
692 if (tqspi->rx_dma_buf) {
693 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
694 tqspi->rx_dma_buf, tqspi->rx_dma_phys);
695 tqspi->rx_dma_buf = NULL;
696 }
697
698 if (tqspi->rx_dma_chan) {
699 dma_release_channel(tqspi->rx_dma_chan);
700 tqspi->rx_dma_chan = NULL;
701 }
702}
703
704static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
705{
706 struct dma_chan *dma_chan;
707 dma_addr_t dma_phys;
708 u32 *dma_buf;
709 int err;
710
711 dma_chan = dma_request_chan(tqspi->dev, "rx");
712 if (IS_ERR(dma_chan)) {
713 err = PTR_ERR(dma_chan);
714 goto err_out;
715 }
716
717 tqspi->rx_dma_chan = dma_chan;
718
719 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
720 if (!dma_buf) {
721 err = -ENOMEM;
722 goto err_out;
723 }
724
725 tqspi->rx_dma_buf = dma_buf;
726 tqspi->rx_dma_phys = dma_phys;
727
728 dma_chan = dma_request_chan(tqspi->dev, "tx");
729 if (IS_ERR(dma_chan)) {
730 err = PTR_ERR(dma_chan);
731 goto err_out;
732 }
733
734 tqspi->tx_dma_chan = dma_chan;
735
736 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
737 if (!dma_buf) {
738 err = -ENOMEM;
739 goto err_out;
740 }
741
742 tqspi->tx_dma_buf = dma_buf;
743 tqspi->tx_dma_phys = dma_phys;
744 tqspi->use_dma = true;
745
746 return 0;
747
748err_out:
749 tegra_qspi_deinit_dma(tqspi);
750
751 if (err != -EPROBE_DEFER) {
752 dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
753 dev_err(tqspi->dev, "falling back to PIO\n");
754 return 0;
755 }
756
757 return err;
758}
759
760static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
761 bool is_first_of_msg)
762{
763 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
764 struct tegra_qspi_client_data *cdata = spi->controller_data;
765 u32 command1, command2, speed = t->speed_hz;
766 u8 bits_per_word = t->bits_per_word;
767 u32 tx_tap = 0, rx_tap = 0;
768 int req_mode;
769
770 if (speed != tqspi->cur_speed) {
771 clk_set_rate(tqspi->clk, speed);
772 tqspi->cur_speed = speed;
773 }
774
775 tqspi->cur_pos = 0;
776 tqspi->cur_rx_pos = 0;
777 tqspi->cur_tx_pos = 0;
778 tqspi->curr_xfer = t;
779
780 if (is_first_of_msg) {
781 tegra_qspi_mask_clear_irq(tqspi);
782
783 command1 = tqspi->def_command1_reg;
784 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
785
786 command1 &= ~QSPI_CONTROL_MODE_MASK;
787 req_mode = spi->mode & 0x3;
788 if (req_mode == SPI_MODE_3)
789 command1 |= QSPI_CONTROL_MODE_3;
790 else
791 command1 |= QSPI_CONTROL_MODE_0;
792
793 if (spi->mode & SPI_CS_HIGH)
794 command1 |= QSPI_CS_SW_VAL;
795 else
796 command1 &= ~QSPI_CS_SW_VAL;
797 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
798
799 if (cdata && cdata->tx_clk_tap_delay)
800 tx_tap = cdata->tx_clk_tap_delay;
801
802 if (cdata && cdata->rx_clk_tap_delay)
803 rx_tap = cdata->rx_clk_tap_delay;
804
805 command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
806 if (command2 != tqspi->def_command2_reg)
807 tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
808
809 } else {
810 command1 = tqspi->command1_reg;
811 command1 &= ~QSPI_BIT_LENGTH(~0);
812 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
813 }
814
815 command1 &= ~QSPI_SDR_DDR_SEL;
816
817 return command1;
818}
819
820static int tegra_qspi_start_transfer_one(struct spi_device *spi,
821 struct spi_transfer *t, u32 command1)
822{
823 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
824 unsigned int total_fifo_words;
825 u8 bus_width = 0;
826 int ret;
827
828 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
829
830 command1 &= ~QSPI_PACKED;
831 if (tqspi->is_packed)
832 command1 |= QSPI_PACKED;
833 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
834
835 tqspi->cur_direction = 0;
836
837 command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
838 if (t->rx_buf) {
839 command1 |= QSPI_RX_EN;
840 tqspi->cur_direction |= DATA_DIR_RX;
841 bus_width = t->rx_nbits;
842 }
843
844 if (t->tx_buf) {
845 command1 |= QSPI_TX_EN;
846 tqspi->cur_direction |= DATA_DIR_TX;
847 bus_width = t->tx_nbits;
848 }
849
850 command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
851
852 if (bus_width == SPI_NBITS_QUAD)
853 command1 |= QSPI_INTERFACE_WIDTH_QUAD;
854 else if (bus_width == SPI_NBITS_DUAL)
855 command1 |= QSPI_INTERFACE_WIDTH_DUAL;
856 else
857 command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
858
859 tqspi->command1_reg = command1;
860
Sowjanya Komatineni6a8a8b52020-12-21 13:17:36 -0800861 tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
862
Sowjanya Komatineni921fc182020-12-21 13:17:34 -0800863 ret = tegra_qspi_flush_fifos(tqspi, false);
864 if (ret < 0)
865 return ret;
866
867 if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
868 ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
869 else
870 ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
871
872 return ret;
873}
874
875static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
876{
877 struct tegra_qspi_client_data *cdata;
878 struct device_node *slave_np = spi->dev.of_node;
879
Krishna Yarlagaddaf89d2cc2021-11-25 15:25:51 +0530880 cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
Sowjanya Komatineni921fc182020-12-21 13:17:34 -0800881 if (!cdata)
882 return NULL;
883
884 of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
885 &cdata->tx_clk_tap_delay);
886 of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
887 &cdata->rx_clk_tap_delay);
888 return cdata;
889}
890
Sowjanya Komatineni921fc182020-12-21 13:17:34 -0800891static int tegra_qspi_setup(struct spi_device *spi)
892{
893 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
894 struct tegra_qspi_client_data *cdata = spi->controller_data;
895 unsigned long flags;
896 u32 val;
897 int ret;
898
899 ret = pm_runtime_resume_and_get(tqspi->dev);
900 if (ret < 0) {
901 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
902 return ret;
903 }
904
905 if (!cdata) {
906 cdata = tegra_qspi_parse_cdata_dt(spi);
907 spi->controller_data = cdata;
908 }
909
910 spin_lock_irqsave(&tqspi->lock, flags);
911
912 /* keep default cs state to inactive */
913 val = tqspi->def_command1_reg;
914 if (spi->mode & SPI_CS_HIGH)
915 val &= ~QSPI_CS_SW_VAL;
916 else
917 val |= QSPI_CS_SW_VAL;
918
919 tqspi->def_command1_reg = val;
920 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
921
922 spin_unlock_irqrestore(&tqspi->lock, flags);
923
924 pm_runtime_put(tqspi->dev);
925
926 return 0;
927}
928
929static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
930{
931 dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
932 dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
933 tegra_qspi_readl(tqspi, QSPI_COMMAND1),
934 tegra_qspi_readl(tqspi, QSPI_COMMAND2));
935 dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
936 tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
937 tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
938 dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n",
939 tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
940 tegra_qspi_readl(tqspi, QSPI_MISC_REG));
941 dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
942 tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
943 tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
944}
945
946static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
947{
948 dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
949 tegra_qspi_dump_regs(tqspi);
950 tegra_qspi_flush_fifos(tqspi, true);
951 reset_control_assert(tqspi->rst);
952 udelay(2);
953 reset_control_deassert(tqspi->rst);
954}
955
956static void tegra_qspi_transfer_end(struct spi_device *spi)
957{
958 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
959 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
960
961 if (cs_val)
962 tqspi->command1_reg |= QSPI_CS_SW_VAL;
963 else
964 tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
965 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
966 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
967}
968
969static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi_message *msg)
970{
971 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
972 struct spi_device *spi = msg->spi;
Sowjanya Komatineni6a8a8b52020-12-21 13:17:36 -0800973 struct spi_transfer *transfer;
Sowjanya Komatineni921fc182020-12-21 13:17:34 -0800974 bool is_first_msg = true;
975 int ret;
976
977 msg->status = 0;
978 msg->actual_length = 0;
979 tqspi->tx_status = 0;
980 tqspi->rx_status = 0;
981
Sowjanya Komatineni6a8a8b52020-12-21 13:17:36 -0800982 list_for_each_entry(transfer, &msg->transfers, transfer_list) {
983 struct spi_transfer *xfer = transfer;
984 u8 dummy_bytes = 0;
Sowjanya Komatineni921fc182020-12-21 13:17:34 -0800985 u32 cmd1;
986
Sowjanya Komatineni6a8a8b52020-12-21 13:17:36 -0800987 tqspi->dummy_cycles = 0;
988 /*
989 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
990 * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
991 * So, check if the next transfer is dummy data transfer and program dummy
992 * clock cycles along with the current transfer and skip next transfer.
993 */
994 if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
995 struct spi_transfer *next_xfer;
996
997 next_xfer = list_next_entry(xfer, transfer_list);
998 if (next_xfer->dummy_data) {
999 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
1000
1001 if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
1002 tqspi->dummy_cycles = dummy_cycles;
1003 dummy_bytes = next_xfer->len;
1004 transfer = next_xfer;
1005 }
1006 }
1007 }
1008
Sowjanya Komatineni921fc182020-12-21 13:17:34 -08001009 reinit_completion(&tqspi->xfer_completion);
1010
1011 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
1012
1013 ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
1014 if (ret < 0) {
1015 dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
1016 goto complete_xfer;
1017 }
1018
1019 is_first_msg = false;
1020 ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1021 QSPI_DMA_TIMEOUT);
1022 if (WARN_ON(ret == 0)) {
Christophe JAILLET665a9902021-05-07 18:26:39 +02001023 dev_err(tqspi->dev, "transfer timeout\n");
Sowjanya Komatineni921fc182020-12-21 13:17:34 -08001024 if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1025 dmaengine_terminate_all(tqspi->tx_dma_chan);
1026 if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1027 dmaengine_terminate_all(tqspi->rx_dma_chan);
1028 tegra_qspi_handle_error(tqspi);
1029 ret = -EIO;
1030 goto complete_xfer;
1031 }
1032
1033 if (tqspi->tx_status || tqspi->rx_status) {
1034 tegra_qspi_handle_error(tqspi);
1035 ret = -EIO;
1036 goto complete_xfer;
1037 }
1038
Sowjanya Komatineni6a8a8b52020-12-21 13:17:36 -08001039 msg->actual_length += xfer->len + dummy_bytes;
Sowjanya Komatineni921fc182020-12-21 13:17:34 -08001040
1041complete_xfer:
1042 if (ret < 0) {
1043 tegra_qspi_transfer_end(spi);
1044 spi_transfer_delay_exec(xfer);
1045 goto exit;
1046 }
1047
1048 if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1049 /* de-activate CS after last transfer only when cs_change is not set */
1050 if (!xfer->cs_change) {
1051 tegra_qspi_transfer_end(spi);
1052 spi_transfer_delay_exec(xfer);
1053 }
1054 } else if (xfer->cs_change) {
1055 /* de-activated CS between the transfers only when cs_change is set */
1056 tegra_qspi_transfer_end(spi);
1057 spi_transfer_delay_exec(xfer);
1058 }
1059 }
1060
1061 ret = 0;
1062exit:
1063 msg->status = ret;
1064 spi_finalize_current_message(master);
1065 return ret;
1066}
1067
1068static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1069{
1070 struct spi_transfer *t = tqspi->curr_xfer;
1071 unsigned long flags;
1072
1073 spin_lock_irqsave(&tqspi->lock, flags);
1074
1075 if (tqspi->tx_status || tqspi->rx_status) {
1076 tegra_qspi_handle_error(tqspi);
1077 complete(&tqspi->xfer_completion);
1078 goto exit;
1079 }
1080
1081 if (tqspi->cur_direction & DATA_DIR_RX)
1082 tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1083
1084 if (tqspi->cur_direction & DATA_DIR_TX)
1085 tqspi->cur_pos = tqspi->cur_tx_pos;
1086 else
1087 tqspi->cur_pos = tqspi->cur_rx_pos;
1088
1089 if (tqspi->cur_pos == t->len) {
1090 complete(&tqspi->xfer_completion);
1091 goto exit;
1092 }
1093
1094 tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1095 tegra_qspi_start_cpu_based_transfer(tqspi, t);
1096exit:
1097 spin_unlock_irqrestore(&tqspi->lock, flags);
1098 return IRQ_HANDLED;
1099}
1100
1101static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1102{
1103 struct spi_transfer *t = tqspi->curr_xfer;
1104 unsigned int total_fifo_words;
1105 unsigned long flags;
1106 long wait_status;
1107 int err = 0;
1108
1109 if (tqspi->cur_direction & DATA_DIR_TX) {
1110 if (tqspi->tx_status) {
1111 dmaengine_terminate_all(tqspi->tx_dma_chan);
1112 err += 1;
1113 } else {
1114 wait_status = wait_for_completion_interruptible_timeout(
1115 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1116 if (wait_status <= 0) {
1117 dmaengine_terminate_all(tqspi->tx_dma_chan);
1118 dev_err(tqspi->dev, "failed TX DMA transfer\n");
1119 err += 1;
1120 }
1121 }
1122 }
1123
1124 if (tqspi->cur_direction & DATA_DIR_RX) {
1125 if (tqspi->rx_status) {
1126 dmaengine_terminate_all(tqspi->rx_dma_chan);
1127 err += 2;
1128 } else {
1129 wait_status = wait_for_completion_interruptible_timeout(
1130 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1131 if (wait_status <= 0) {
1132 dmaengine_terminate_all(tqspi->rx_dma_chan);
1133 dev_err(tqspi->dev, "failed RX DMA transfer\n");
1134 err += 2;
1135 }
1136 }
1137 }
1138
1139 spin_lock_irqsave(&tqspi->lock, flags);
1140
1141 if (err) {
1142 tegra_qspi_dma_unmap_xfer(tqspi, t);
1143 tegra_qspi_handle_error(tqspi);
1144 complete(&tqspi->xfer_completion);
1145 goto exit;
1146 }
1147
1148 if (tqspi->cur_direction & DATA_DIR_RX)
1149 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1150
1151 if (tqspi->cur_direction & DATA_DIR_TX)
1152 tqspi->cur_pos = tqspi->cur_tx_pos;
1153 else
1154 tqspi->cur_pos = tqspi->cur_rx_pos;
1155
1156 if (tqspi->cur_pos == t->len) {
1157 tegra_qspi_dma_unmap_xfer(tqspi, t);
1158 complete(&tqspi->xfer_completion);
1159 goto exit;
1160 }
1161
1162 tegra_qspi_dma_unmap_xfer(tqspi, t);
1163
1164 /* continue transfer in current message */
1165 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1166 if (total_fifo_words > QSPI_FIFO_DEPTH)
1167 err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1168 else
1169 err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1170
1171exit:
1172 spin_unlock_irqrestore(&tqspi->lock, flags);
1173 return IRQ_HANDLED;
1174}
1175
1176static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1177{
1178 struct tegra_qspi *tqspi = context_data;
1179
1180 tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1181
1182 if (tqspi->cur_direction & DATA_DIR_TX)
1183 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1184
1185 if (tqspi->cur_direction & DATA_DIR_RX)
1186 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1187
1188 tegra_qspi_mask_clear_irq(tqspi);
1189
1190 if (!tqspi->is_curr_dma_xfer)
1191 return handle_cpu_based_xfer(tqspi);
1192
1193 return handle_dma_based_xfer(tqspi);
1194}
1195
1196static const struct of_device_id tegra_qspi_of_match[] = {
1197 { .compatible = "nvidia,tegra210-qspi", },
1198 { .compatible = "nvidia,tegra186-qspi", },
1199 { .compatible = "nvidia,tegra194-qspi", },
1200 {}
1201};
1202
1203MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1204
1205static int tegra_qspi_probe(struct platform_device *pdev)
1206{
1207 struct spi_master *master;
1208 struct tegra_qspi *tqspi;
1209 struct resource *r;
1210 int ret, qspi_irq;
1211 int bus_num;
1212
1213 master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
1214 if (!master)
1215 return -ENOMEM;
1216
1217 platform_set_drvdata(pdev, master);
1218 tqspi = spi_master_get_devdata(master);
1219
1220 master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1221 SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1222 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1223 master->setup = tegra_qspi_setup;
Sowjanya Komatineni921fc182020-12-21 13:17:34 -08001224 master->transfer_one_message = tegra_qspi_transfer_one_message;
1225 master->num_chipselect = 1;
1226 master->auto_runtime_pm = true;
1227
1228 bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1229 if (bus_num >= 0)
1230 master->bus_num = bus_num;
1231
1232 tqspi->master = master;
1233 tqspi->dev = &pdev->dev;
1234 spin_lock_init(&tqspi->lock);
1235
1236 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1237 tqspi->base = devm_ioremap_resource(&pdev->dev, r);
1238 if (IS_ERR(tqspi->base))
1239 return PTR_ERR(tqspi->base);
1240
1241 tqspi->phys = r->start;
1242 qspi_irq = platform_get_irq(pdev, 0);
1243 tqspi->irq = qspi_irq;
1244
1245 tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1246 if (IS_ERR(tqspi->clk)) {
1247 ret = PTR_ERR(tqspi->clk);
1248 dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1249 return ret;
1250 }
1251
1252 tqspi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
1253 if (IS_ERR(tqspi->rst)) {
1254 ret = PTR_ERR(tqspi->rst);
1255 dev_err(&pdev->dev, "failed to get reset control: %d\n", ret);
1256 return ret;
1257 }
1258
1259 tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1260 tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1261
1262 ret = tegra_qspi_init_dma(tqspi);
1263 if (ret < 0)
1264 return ret;
1265
1266 if (tqspi->use_dma)
1267 tqspi->max_buf_size = tqspi->dma_buf_size;
1268
1269 init_completion(&tqspi->tx_dma_complete);
1270 init_completion(&tqspi->rx_dma_complete);
1271 init_completion(&tqspi->xfer_completion);
1272
1273 pm_runtime_enable(&pdev->dev);
1274 ret = pm_runtime_resume_and_get(&pdev->dev);
1275 if (ret < 0) {
1276 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1277 goto exit_pm_disable;
1278 }
1279
1280 reset_control_assert(tqspi->rst);
1281 udelay(2);
1282 reset_control_deassert(tqspi->rst);
1283
1284 tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
1285 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1286 tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1287 tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1288 tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1289
1290 pm_runtime_put(&pdev->dev);
1291
1292 ret = request_threaded_irq(tqspi->irq, NULL,
1293 tegra_qspi_isr_thread, IRQF_ONESHOT,
1294 dev_name(&pdev->dev), tqspi);
1295 if (ret < 0) {
1296 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1297 goto exit_pm_disable;
1298 }
1299
1300 master->dev.of_node = pdev->dev.of_node;
1301 ret = spi_register_master(master);
1302 if (ret < 0) {
1303 dev_err(&pdev->dev, "failed to register master: %d\n", ret);
1304 goto exit_free_irq;
1305 }
1306
1307 return 0;
1308
1309exit_free_irq:
1310 free_irq(qspi_irq, tqspi);
1311exit_pm_disable:
Dmitry Osipenko134a7232021-10-24 01:59:51 +03001312 pm_runtime_force_suspend(&pdev->dev);
Sowjanya Komatineni921fc182020-12-21 13:17:34 -08001313 tegra_qspi_deinit_dma(tqspi);
1314 return ret;
1315}
1316
1317static int tegra_qspi_remove(struct platform_device *pdev)
1318{
1319 struct spi_master *master = platform_get_drvdata(pdev);
1320 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1321
1322 spi_unregister_master(master);
1323 free_irq(tqspi->irq, tqspi);
Dmitry Osipenko134a7232021-10-24 01:59:51 +03001324 pm_runtime_force_suspend(&pdev->dev);
Sowjanya Komatineni921fc182020-12-21 13:17:34 -08001325 tegra_qspi_deinit_dma(tqspi);
1326
1327 return 0;
1328}
1329
1330static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1331{
1332 struct spi_master *master = dev_get_drvdata(dev);
1333
1334 return spi_master_suspend(master);
1335}
1336
1337static int __maybe_unused tegra_qspi_resume(struct device *dev)
1338{
1339 struct spi_master *master = dev_get_drvdata(dev);
1340 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1341 int ret;
1342
1343 ret = pm_runtime_resume_and_get(dev);
1344 if (ret < 0) {
1345 dev_err(dev, "failed to get runtime PM: %d\n", ret);
1346 return ret;
1347 }
1348
1349 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1350 tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1351 pm_runtime_put(dev);
1352
1353 return spi_master_resume(master);
1354}
1355
1356static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1357{
1358 struct spi_master *master = dev_get_drvdata(dev);
1359 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1360
1361 /* flush all write which are in PPSB queue by reading back */
1362 tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1363
1364 clk_disable_unprepare(tqspi->clk);
1365
1366 return 0;
1367}
1368
1369static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1370{
1371 struct spi_master *master = dev_get_drvdata(dev);
1372 struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1373 int ret;
1374
1375 ret = clk_prepare_enable(tqspi->clk);
1376 if (ret < 0)
1377 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1378
1379 return ret;
1380}
1381
1382static const struct dev_pm_ops tegra_qspi_pm_ops = {
1383 SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1384 SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1385};
1386
1387static struct platform_driver tegra_qspi_driver = {
1388 .driver = {
1389 .name = "tegra-qspi",
1390 .pm = &tegra_qspi_pm_ops,
1391 .of_match_table = tegra_qspi_of_match,
1392 },
1393 .probe = tegra_qspi_probe,
1394 .remove = tegra_qspi_remove,
1395};
1396module_platform_driver(tegra_qspi_driver);
1397
1398MODULE_ALIAS("platform:qspi-tegra");
1399MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1400MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1401MODULE_LICENSE("GPL v2");