blob: b19d02bf01889cabb4523dc70540bf5d6760be98 [file] [log] [blame]
Cezary Gapinskid57a9842018-12-24 23:00:27 +01001// SPDX-License-Identifier: GPL-2.0
2//
3// STMicroelectronics STM32 SPI Controller driver (master mode only)
4//
5// Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
7
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02008#include <linux/debugfs.h>
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/gpio.h>
13#include <linux/interrupt.h>
14#include <linux/iopoll.h>
15#include <linux/module.h>
16#include <linux/of_platform.h>
Amelie Delaunay038ac862017-06-27 17:45:18 +020017#include <linux/pm_runtime.h>
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020018#include <linux/reset.h>
19#include <linux/spi/spi.h>
20
21#define DRIVER_NAME "spi_stm32"
22
Cezary Gapinski86026632018-12-24 23:00:33 +010023/* STM32H7 SPI registers */
24#define STM32H7_SPI_CR1 0x00
25#define STM32H7_SPI_CR2 0x04
26#define STM32H7_SPI_CFG1 0x08
27#define STM32H7_SPI_CFG2 0x0C
28#define STM32H7_SPI_IER 0x10
29#define STM32H7_SPI_SR 0x14
30#define STM32H7_SPI_IFCR 0x18
31#define STM32H7_SPI_TXDR 0x20
32#define STM32H7_SPI_RXDR 0x30
33#define STM32H7_SPI_I2SCFGR 0x50
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020034
Cezary Gapinski86026632018-12-24 23:00:33 +010035/* STM32H7_SPI_CR1 bit fields */
36#define STM32H7_SPI_CR1_SPE BIT(0)
37#define STM32H7_SPI_CR1_MASRX BIT(8)
38#define STM32H7_SPI_CR1_CSTART BIT(9)
39#define STM32H7_SPI_CR1_CSUSP BIT(10)
40#define STM32H7_SPI_CR1_HDDIR BIT(11)
41#define STM32H7_SPI_CR1_SSI BIT(12)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020042
Cezary Gapinski86026632018-12-24 23:00:33 +010043/* STM32H7_SPI_CR2 bit fields */
44#define STM32H7_SPI_CR2_TSIZE_SHIFT 0
45#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020046
Cezary Gapinski86026632018-12-24 23:00:33 +010047/* STM32H7_SPI_CFG1 bit fields */
48#define STM32H7_SPI_CFG1_DSIZE_SHIFT 0
49#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0)
50#define STM32H7_SPI_CFG1_FTHLV_SHIFT 5
51#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5)
52#define STM32H7_SPI_CFG1_RXDMAEN BIT(14)
53#define STM32H7_SPI_CFG1_TXDMAEN BIT(15)
54#define STM32H7_SPI_CFG1_MBR_SHIFT 28
55#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28)
56#define STM32H7_SPI_CFG1_MBR_MIN 0
57#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020058
Cezary Gapinski86026632018-12-24 23:00:33 +010059/* STM32H7_SPI_CFG2 bit fields */
60#define STM32H7_SPI_CFG2_MIDI_SHIFT 4
61#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4)
62#define STM32H7_SPI_CFG2_COMM_SHIFT 17
63#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17)
64#define STM32H7_SPI_CFG2_SP_SHIFT 19
65#define STM32H7_SPI_CFG2_SP GENMASK(21, 19)
66#define STM32H7_SPI_CFG2_MASTER BIT(22)
67#define STM32H7_SPI_CFG2_LSBFRST BIT(23)
68#define STM32H7_SPI_CFG2_CPHA BIT(24)
69#define STM32H7_SPI_CFG2_CPOL BIT(25)
70#define STM32H7_SPI_CFG2_SSM BIT(26)
71#define STM32H7_SPI_CFG2_AFCNTR BIT(31)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020072
Cezary Gapinski86026632018-12-24 23:00:33 +010073/* STM32H7_SPI_IER bit fields */
74#define STM32H7_SPI_IER_RXPIE BIT(0)
75#define STM32H7_SPI_IER_TXPIE BIT(1)
76#define STM32H7_SPI_IER_DXPIE BIT(2)
77#define STM32H7_SPI_IER_EOTIE BIT(3)
78#define STM32H7_SPI_IER_TXTFIE BIT(4)
79#define STM32H7_SPI_IER_OVRIE BIT(6)
80#define STM32H7_SPI_IER_MODFIE BIT(9)
81#define STM32H7_SPI_IER_ALL GENMASK(10, 0)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020082
Cezary Gapinski86026632018-12-24 23:00:33 +010083/* STM32H7_SPI_SR bit fields */
84#define STM32H7_SPI_SR_RXP BIT(0)
85#define STM32H7_SPI_SR_TXP BIT(1)
86#define STM32H7_SPI_SR_EOT BIT(3)
87#define STM32H7_SPI_SR_OVR BIT(6)
88#define STM32H7_SPI_SR_MODF BIT(9)
89#define STM32H7_SPI_SR_SUSP BIT(11)
90#define STM32H7_SPI_SR_RXPLVL_SHIFT 13
91#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13)
92#define STM32H7_SPI_SR_RXWNE BIT(15)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020093
Cezary Gapinski86026632018-12-24 23:00:33 +010094/* STM32H7_SPI_IFCR bit fields */
95#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020096
Cezary Gapinski86026632018-12-24 23:00:33 +010097/* STM32H7_SPI_I2SCFGR bit fields */
98#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +020099
Cezary Gapinski86026632018-12-24 23:00:33 +0100100/* STM32H7 SPI Master Baud Rate min/max divisor */
101#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
102#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200103
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100104/* STM32H7 SPI Communication mode */
105#define STM32H7_SPI_FULL_DUPLEX 0
106#define STM32H7_SPI_SIMPLEX_TX 1
107#define STM32H7_SPI_SIMPLEX_RX 2
108#define STM32H7_SPI_HALF_DUPLEX 3
109
110/* SPI Communication type */
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200111#define SPI_FULL_DUPLEX 0
112#define SPI_SIMPLEX_TX 1
113#define SPI_SIMPLEX_RX 2
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100114#define SPI_3WIRE_TX 3
115#define SPI_3WIRE_RX 4
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200116
117#define SPI_1HZ_NS 1000000000
118
119/**
120 * struct stm32_spi - private data of the SPI controller
121 * @dev: driver model representation of the controller
122 * @master: controller master interface
123 * @base: virtual memory area
124 * @clk: hw kernel clock feeding the SPI clock generator
125 * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
126 * @rst: SPI controller reset line
127 * @lock: prevent I/O concurrent access
128 * @irq: SPI controller interrupt line
129 * @fifo_size: size of the embedded fifo in bytes
130 * @cur_midi: master inter-data idleness in ns
131 * @cur_speed: speed configured in Hz
132 * @cur_bpw: number of bits in a single SPI data frame
133 * @cur_fthlv: fifo threshold level (data frames in a single data packet)
134 * @cur_comm: SPI communication mode
135 * @cur_xferlen: current transfer length in bytes
136 * @cur_usedma: boolean to know if dma is used in current transfer
137 * @tx_buf: data to be written, or NULL
138 * @rx_buf: data to be read, or NULL
139 * @tx_len: number of data to be written in bytes
140 * @rx_len: number of data to be read in bytes
141 * @dma_tx: dma channel for TX transfer
142 * @dma_rx: dma channel for RX transfer
143 * @phys_addr: SPI registers physical base address
144 */
145struct stm32_spi {
146 struct device *dev;
147 struct spi_master *master;
148 void __iomem *base;
149 struct clk *clk;
150 u32 clk_rate;
151 struct reset_control *rst;
152 spinlock_t lock; /* prevent I/O concurrent access */
153 int irq;
154 unsigned int fifo_size;
155
156 unsigned int cur_midi;
157 unsigned int cur_speed;
158 unsigned int cur_bpw;
159 unsigned int cur_fthlv;
160 unsigned int cur_comm;
161 unsigned int cur_xferlen;
162 bool cur_usedma;
163
164 const void *tx_buf;
165 void *rx_buf;
166 int tx_len;
167 int rx_len;
168 struct dma_chan *dma_tx;
169 struct dma_chan *dma_rx;
170 dma_addr_t phys_addr;
171};
172
173static inline void stm32_spi_set_bits(struct stm32_spi *spi,
174 u32 offset, u32 bits)
175{
176 writel_relaxed(readl_relaxed(spi->base + offset) | bits,
177 spi->base + offset);
178}
179
180static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
181 u32 offset, u32 bits)
182{
183 writel_relaxed(readl_relaxed(spi->base + offset) & ~bits,
184 spi->base + offset);
185}
186
187/**
188 * stm32_spi_get_fifo_size - Return fifo size
189 * @spi: pointer to the spi controller data structure
190 */
191static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
192{
193 unsigned long flags;
194 u32 count = 0;
195
196 spin_lock_irqsave(&spi->lock, flags);
197
Cezary Gapinski86026632018-12-24 23:00:33 +0100198 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200199
Cezary Gapinski86026632018-12-24 23:00:33 +0100200 while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP)
201 writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200202
Cezary Gapinski86026632018-12-24 23:00:33 +0100203 stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200204
205 spin_unlock_irqrestore(&spi->lock, flags);
206
207 dev_dbg(spi->dev, "%d x 8-bit fifo size\n", count);
208
209 return count;
210}
211
212/**
213 * stm32_spi_get_bpw_mask - Return bits per word mask
214 * @spi: pointer to the spi controller data structure
215 */
216static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
217{
218 unsigned long flags;
219 u32 cfg1, max_bpw;
220
221 spin_lock_irqsave(&spi->lock, flags);
222
223 /*
224 * The most significant bit at DSIZE bit field is reserved when the
225 * maximum data size of periperal instances is limited to 16-bit
226 */
Cezary Gapinski86026632018-12-24 23:00:33 +0100227 stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200228
Cezary Gapinski86026632018-12-24 23:00:33 +0100229 cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1);
230 max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >>
231 STM32H7_SPI_CFG1_DSIZE_SHIFT;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200232 max_bpw += 1;
233
234 spin_unlock_irqrestore(&spi->lock, flags);
235
236 dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
237
238 return SPI_BPW_RANGE_MASK(4, max_bpw);
239}
240
241/**
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100242 * stm32_spi_prepare_mbr - Determine baud rate divisor value
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200243 * @spi: pointer to the spi controller data structure
244 * @speed_hz: requested speed
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100245 * @min_div: minimum baud rate divisor
246 * @max_div: maximum baud rate divisor
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200247 *
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100248 * Return baud rate divisor value in case of success or -EINVAL
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200249 */
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100250static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
251 u32 min_div, u32 max_div)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200252{
253 u32 div, mbrdiv;
254
255 div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
256
257 /*
258 * SPI framework set xfer->speed_hz to master->max_speed_hz if
259 * xfer->speed_hz is greater than master->max_speed_hz, and it returns
260 * an error when xfer->speed_hz is lower than master->min_speed_hz, so
261 * no need to check it there.
262 * However, we need to ensure the following calculations.
263 */
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100264 if ((div < min_div) || (div > max_div))
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200265 return -EINVAL;
266
267 /* Determine the first power of 2 greater than or equal to div */
Amelie Delaunay128ebb82017-06-27 17:45:17 +0200268 if (div & (div - 1))
269 mbrdiv = fls(div);
270 else
271 mbrdiv = fls(div) - 1;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200272
273 spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
274
275 return mbrdiv - 1;
276}
277
278/**
279 * stm32_spi_prepare_fthlv - Determine FIFO threshold level
280 * @spi: pointer to the spi controller data structure
281 */
282static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
283{
284 u32 fthlv, half_fifo;
285
286 /* data packet should not exceed 1/2 of fifo space */
287 half_fifo = (spi->fifo_size / 2);
288
Amelie Delaunay128ebb82017-06-27 17:45:17 +0200289 if (spi->cur_bpw <= 8)
290 fthlv = half_fifo;
291 else if (spi->cur_bpw <= 16)
292 fthlv = half_fifo / 2;
293 else
294 fthlv = half_fifo / 4;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200295
296 /* align packet size with data registers access */
297 if (spi->cur_bpw > 8)
298 fthlv -= (fthlv % 2); /* multiple of 2 */
299 else
300 fthlv -= (fthlv % 4); /* multiple of 4 */
301
302 return fthlv;
303}
304
305/**
306 * stm32_spi_write_txfifo - Write bytes in Transmit Data Register
307 * @spi: pointer to the spi controller data structure
308 *
309 * Read from tx_buf depends on remaining bytes to avoid to read beyond
310 * tx_buf end.
311 */
312static void stm32_spi_write_txfifo(struct stm32_spi *spi)
313{
314 while ((spi->tx_len > 0) &&
Cezary Gapinski86026632018-12-24 23:00:33 +0100315 (readl_relaxed(spi->base + STM32H7_SPI_SR) &
316 STM32H7_SPI_SR_TXP)) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200317 u32 offs = spi->cur_xferlen - spi->tx_len;
318
319 if (spi->tx_len >= sizeof(u32)) {
320 const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
321
Cezary Gapinski86026632018-12-24 23:00:33 +0100322 writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200323 spi->tx_len -= sizeof(u32);
324 } else if (spi->tx_len >= sizeof(u16)) {
325 const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
326
Cezary Gapinski86026632018-12-24 23:00:33 +0100327 writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200328 spi->tx_len -= sizeof(u16);
329 } else {
330 const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
331
Cezary Gapinski86026632018-12-24 23:00:33 +0100332 writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200333 spi->tx_len -= sizeof(u8);
334 }
335 }
336
337 dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
338}
339
340/**
341 * stm32_spi_read_rxfifo - Read bytes in Receive Data Register
342 * @spi: pointer to the spi controller data structure
343 *
344 * Write in rx_buf depends on remaining bytes to avoid to write beyond
345 * rx_buf end.
346 */
347static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
348{
Cezary Gapinski86026632018-12-24 23:00:33 +0100349 u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
350 u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
351 STM32H7_SPI_SR_RXPLVL_SHIFT;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200352
353 while ((spi->rx_len > 0) &&
Cezary Gapinski86026632018-12-24 23:00:33 +0100354 ((sr & STM32H7_SPI_SR_RXP) ||
355 (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200356 u32 offs = spi->cur_xferlen - spi->rx_len;
357
358 if ((spi->rx_len >= sizeof(u32)) ||
Cezary Gapinski86026632018-12-24 23:00:33 +0100359 (flush && (sr & STM32H7_SPI_SR_RXWNE))) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200360 u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
361
Cezary Gapinski86026632018-12-24 23:00:33 +0100362 *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200363 spi->rx_len -= sizeof(u32);
364 } else if ((spi->rx_len >= sizeof(u16)) ||
365 (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
366 u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
367
Cezary Gapinski86026632018-12-24 23:00:33 +0100368 *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200369 spi->rx_len -= sizeof(u16);
370 } else {
371 u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
372
Cezary Gapinski86026632018-12-24 23:00:33 +0100373 *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200374 spi->rx_len -= sizeof(u8);
375 }
376
Cezary Gapinski86026632018-12-24 23:00:33 +0100377 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
378 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
379 STM32H7_SPI_SR_RXPLVL_SHIFT;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200380 }
381
382 dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
383 flush ? "(flush)" : "", spi->rx_len);
384}
385
386/**
387 * stm32_spi_enable - Enable SPI controller
388 * @spi: pointer to the spi controller data structure
389 *
390 * SPI data transfer is enabled but spi_ker_ck is idle.
391 * SPI_CFG1 and SPI_CFG2 are now write protected.
392 */
393static void stm32_spi_enable(struct stm32_spi *spi)
394{
395 dev_dbg(spi->dev, "enable controller\n");
396
Cezary Gapinski86026632018-12-24 23:00:33 +0100397 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200398}
399
400/**
401 * stm32_spi_disable - Disable SPI controller
402 * @spi: pointer to the spi controller data structure
403 *
404 * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
405 * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in
406 * RX-Fifo.
407 */
408static void stm32_spi_disable(struct stm32_spi *spi)
409{
410 unsigned long flags;
411 u32 cr1, sr;
412
413 dev_dbg(spi->dev, "disable controller\n");
414
415 spin_lock_irqsave(&spi->lock, flags);
416
Cezary Gapinski86026632018-12-24 23:00:33 +0100417 cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200418
Cezary Gapinski86026632018-12-24 23:00:33 +0100419 if (!(cr1 & STM32H7_SPI_CR1_SPE)) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200420 spin_unlock_irqrestore(&spi->lock, flags);
421 return;
422 }
423
424 /* Wait on EOT or suspend the flow */
Cezary Gapinski86026632018-12-24 23:00:33 +0100425 if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
426 sr, !(sr & STM32H7_SPI_SR_EOT),
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200427 10, 100000) < 0) {
Cezary Gapinski86026632018-12-24 23:00:33 +0100428 if (cr1 & STM32H7_SPI_CR1_CSTART) {
429 writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
430 spi->base + STM32H7_SPI_CR1);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200431 if (readl_relaxed_poll_timeout_atomic(
Cezary Gapinski86026632018-12-24 23:00:33 +0100432 spi->base + STM32H7_SPI_SR,
433 sr, !(sr & STM32H7_SPI_SR_SUSP),
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200434 10, 100000) < 0)
435 dev_warn(spi->dev,
436 "Suspend request timeout\n");
437 }
438 }
439
440 if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
441 stm32_spi_read_rxfifo(spi, true);
442
Cezary Gapinski2cbee7f2018-12-24 23:00:29 +0100443 if (spi->cur_usedma && spi->dma_tx)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200444 dmaengine_terminate_all(spi->dma_tx);
Cezary Gapinski2cbee7f2018-12-24 23:00:29 +0100445 if (spi->cur_usedma && spi->dma_rx)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200446 dmaengine_terminate_all(spi->dma_rx);
447
Cezary Gapinski86026632018-12-24 23:00:33 +0100448 stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200449
Cezary Gapinski86026632018-12-24 23:00:33 +0100450 stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN |
451 STM32H7_SPI_CFG1_RXDMAEN);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200452
453 /* Disable interrupts and clear status flags */
Cezary Gapinski86026632018-12-24 23:00:33 +0100454 writel_relaxed(0, spi->base + STM32H7_SPI_IER);
455 writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200456
457 spin_unlock_irqrestore(&spi->lock, flags);
458}
459
460/**
461 * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
462 *
463 * If the current transfer size is greater than fifo size, use DMA.
464 */
465static bool stm32_spi_can_dma(struct spi_master *master,
466 struct spi_device *spi_dev,
467 struct spi_transfer *transfer)
468{
469 struct stm32_spi *spi = spi_master_get_devdata(master);
470
471 dev_dbg(spi->dev, "%s: %s\n", __func__,
Amelie Delaunay128ebb82017-06-27 17:45:17 +0200472 (transfer->len > spi->fifo_size) ? "true" : "false");
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200473
Amelie Delaunay128ebb82017-06-27 17:45:17 +0200474 return (transfer->len > spi->fifo_size);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200475}
476
477/**
Cezary Gapinskia9675332018-12-24 23:00:34 +0100478 * stm32_spi_irq_thread - Thread of interrupt handler for SPI controller
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200479 * @irq: interrupt line
480 * @dev_id: SPI controller master interface
481 */
Cezary Gapinskia9675332018-12-24 23:00:34 +0100482static irqreturn_t stm32_spi_irq_thread(int irq, void *dev_id)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200483{
484 struct spi_master *master = dev_id;
485 struct stm32_spi *spi = spi_master_get_devdata(master);
486 u32 sr, ier, mask;
487 unsigned long flags;
488 bool end = false;
489
490 spin_lock_irqsave(&spi->lock, flags);
491
Cezary Gapinski86026632018-12-24 23:00:33 +0100492 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
493 ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200494
495 mask = ier;
496 /* EOTIE is triggered on EOT, SUSP and TXC events. */
Cezary Gapinski86026632018-12-24 23:00:33 +0100497 mask |= STM32H7_SPI_SR_SUSP;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200498 /*
499 * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
500 * Full-Duplex, need to poll RXP event to know if there are remaining
501 * data, before disabling SPI.
502 */
Amelie Delaunay128ebb82017-06-27 17:45:17 +0200503 if (spi->rx_buf && !spi->cur_usedma)
Cezary Gapinski86026632018-12-24 23:00:33 +0100504 mask |= STM32H7_SPI_SR_RXP;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200505
506 if (!(sr & mask)) {
507 dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
508 sr, ier);
509 spin_unlock_irqrestore(&spi->lock, flags);
510 return IRQ_NONE;
511 }
512
Cezary Gapinski86026632018-12-24 23:00:33 +0100513 if (sr & STM32H7_SPI_SR_SUSP) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200514 dev_warn(spi->dev, "Communication suspended\n");
515 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
516 stm32_spi_read_rxfifo(spi, false);
Amelie Delaunayc67ad362017-06-27 17:45:19 +0200517 /*
518 * If communication is suspended while using DMA, it means
519 * that something went wrong, so stop the current transfer
520 */
521 if (spi->cur_usedma)
522 end = true;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200523 }
524
Cezary Gapinski86026632018-12-24 23:00:33 +0100525 if (sr & STM32H7_SPI_SR_MODF) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200526 dev_warn(spi->dev, "Mode fault: transfer aborted\n");
527 end = true;
528 }
529
Cezary Gapinski86026632018-12-24 23:00:33 +0100530 if (sr & STM32H7_SPI_SR_OVR) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200531 dev_warn(spi->dev, "Overrun: received value discarded\n");
532 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
533 stm32_spi_read_rxfifo(spi, false);
Amelie Delaunayc67ad362017-06-27 17:45:19 +0200534 /*
535 * If overrun is detected while using DMA, it means that
536 * something went wrong, so stop the current transfer
537 */
538 if (spi->cur_usedma)
539 end = true;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200540 }
541
Cezary Gapinski86026632018-12-24 23:00:33 +0100542 if (sr & STM32H7_SPI_SR_EOT) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200543 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
544 stm32_spi_read_rxfifo(spi, true);
545 end = true;
546 }
547
Cezary Gapinski86026632018-12-24 23:00:33 +0100548 if (sr & STM32H7_SPI_SR_TXP)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200549 if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
550 stm32_spi_write_txfifo(spi);
551
Cezary Gapinski86026632018-12-24 23:00:33 +0100552 if (sr & STM32H7_SPI_SR_RXP)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200553 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
554 stm32_spi_read_rxfifo(spi, false);
555
Cezary Gapinski86026632018-12-24 23:00:33 +0100556 writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200557
558 spin_unlock_irqrestore(&spi->lock, flags);
559
560 if (end) {
561 spi_finalize_current_transfer(master);
562 stm32_spi_disable(spi);
563 }
564
565 return IRQ_HANDLED;
566}
567
568/**
569 * stm32_spi_setup - setup device chip select
570 */
571static int stm32_spi_setup(struct spi_device *spi_dev)
572{
573 int ret = 0;
574
575 if (!gpio_is_valid(spi_dev->cs_gpio)) {
576 dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
577 spi_dev->cs_gpio);
578 return -EINVAL;
579 }
580
581 dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
582 spi_dev->cs_gpio,
583 (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
584
585 ret = gpio_direction_output(spi_dev->cs_gpio,
586 !(spi_dev->mode & SPI_CS_HIGH));
587
588 return ret;
589}
590
591/**
592 * stm32_spi_prepare_msg - set up the controller to transfer a single message
593 */
594static int stm32_spi_prepare_msg(struct spi_master *master,
595 struct spi_message *msg)
596{
597 struct stm32_spi *spi = spi_master_get_devdata(master);
598 struct spi_device *spi_dev = msg->spi;
599 struct device_node *np = spi_dev->dev.of_node;
600 unsigned long flags;
601 u32 cfg2_clrb = 0, cfg2_setb = 0;
602
603 /* SPI slave device may need time between data frames */
604 spi->cur_midi = 0;
Amelie Delaunay042c1c62017-06-27 17:45:16 +0200605 if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200606 dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
607
608 if (spi_dev->mode & SPI_CPOL)
Cezary Gapinski86026632018-12-24 23:00:33 +0100609 cfg2_setb |= STM32H7_SPI_CFG2_CPOL;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200610 else
Cezary Gapinski86026632018-12-24 23:00:33 +0100611 cfg2_clrb |= STM32H7_SPI_CFG2_CPOL;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200612
613 if (spi_dev->mode & SPI_CPHA)
Cezary Gapinski86026632018-12-24 23:00:33 +0100614 cfg2_setb |= STM32H7_SPI_CFG2_CPHA;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200615 else
Cezary Gapinski86026632018-12-24 23:00:33 +0100616 cfg2_clrb |= STM32H7_SPI_CFG2_CPHA;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200617
618 if (spi_dev->mode & SPI_LSB_FIRST)
Cezary Gapinski86026632018-12-24 23:00:33 +0100619 cfg2_setb |= STM32H7_SPI_CFG2_LSBFRST;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200620 else
Cezary Gapinski86026632018-12-24 23:00:33 +0100621 cfg2_clrb |= STM32H7_SPI_CFG2_LSBFRST;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200622
623 dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
624 spi_dev->mode & SPI_CPOL,
625 spi_dev->mode & SPI_CPHA,
626 spi_dev->mode & SPI_LSB_FIRST,
627 spi_dev->mode & SPI_CS_HIGH);
628
629 spin_lock_irqsave(&spi->lock, flags);
630
631 if (cfg2_clrb || cfg2_setb)
632 writel_relaxed(
Cezary Gapinski86026632018-12-24 23:00:33 +0100633 (readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200634 ~cfg2_clrb) | cfg2_setb,
Cezary Gapinski86026632018-12-24 23:00:33 +0100635 spi->base + STM32H7_SPI_CFG2);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200636
637 spin_unlock_irqrestore(&spi->lock, flags);
638
639 return 0;
640}
641
642/**
643 * stm32_spi_dma_cb - dma callback
644 *
645 * DMA callback is called when the transfer is complete or when an error
646 * occurs. If the transfer is complete, EOT flag is raised.
647 */
648static void stm32_spi_dma_cb(void *data)
649{
650 struct stm32_spi *spi = data;
651 unsigned long flags;
652 u32 sr;
653
654 spin_lock_irqsave(&spi->lock, flags);
655
Cezary Gapinski86026632018-12-24 23:00:33 +0100656 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200657
658 spin_unlock_irqrestore(&spi->lock, flags);
659
Cezary Gapinski86026632018-12-24 23:00:33 +0100660 if (!(sr & STM32H7_SPI_SR_EOT))
Amelie Delaunayc67ad362017-06-27 17:45:19 +0200661 dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200662
Amelie Delaunayc67ad362017-06-27 17:45:19 +0200663 /* Now wait for EOT, or SUSP or OVR in case of error */
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200664}
665
666/**
667 * stm32_spi_dma_config - configure dma slave channel depending on current
668 * transfer bits_per_word.
669 */
670static void stm32_spi_dma_config(struct stm32_spi *spi,
671 struct dma_slave_config *dma_conf,
672 enum dma_transfer_direction dir)
673{
674 enum dma_slave_buswidth buswidth;
675 u32 maxburst;
676
Amelie Delaunay128ebb82017-06-27 17:45:17 +0200677 if (spi->cur_bpw <= 8)
678 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
679 else if (spi->cur_bpw <= 16)
680 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
681 else
682 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200683
684 /* Valid for DMA Half or Full Fifo threshold */
Amelie Delaunay128ebb82017-06-27 17:45:17 +0200685 if (spi->cur_fthlv == 2)
686 maxburst = 1;
687 else
688 maxburst = spi->cur_fthlv;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200689
690 memset(dma_conf, 0, sizeof(struct dma_slave_config));
691 dma_conf->direction = dir;
692 if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
Cezary Gapinski86026632018-12-24 23:00:33 +0100693 dma_conf->src_addr = spi->phys_addr + STM32H7_SPI_RXDR;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200694 dma_conf->src_addr_width = buswidth;
695 dma_conf->src_maxburst = maxburst;
696
697 dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
698 buswidth, maxburst);
699 } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
Cezary Gapinski86026632018-12-24 23:00:33 +0100700 dma_conf->dst_addr = spi->phys_addr + STM32H7_SPI_TXDR;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200701 dma_conf->dst_addr_width = buswidth;
702 dma_conf->dst_maxburst = maxburst;
703
704 dev_dbg(spi->dev, "Tx DMA config buswidth=%d, maxburst=%d\n",
705 buswidth, maxburst);
706 }
707}
708
709/**
710 * stm32_spi_transfer_one_irq - transfer a single spi_transfer using
711 * interrupts
712 *
713 * It must returns 0 if the transfer is finished or 1 if the transfer is still
714 * in progress.
715 */
716static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
717{
718 unsigned long flags;
719 u32 ier = 0;
720
721 /* Enable the interrupts relative to the current communication mode */
722 if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
Cezary Gapinski86026632018-12-24 23:00:33 +0100723 ier |= STM32H7_SPI_IER_DXPIE;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200724 else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
Cezary Gapinski86026632018-12-24 23:00:33 +0100725 ier |= STM32H7_SPI_IER_TXPIE;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200726 else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
Cezary Gapinski86026632018-12-24 23:00:33 +0100727 ier |= STM32H7_SPI_IER_RXPIE;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200728
729 /* Enable the interrupts relative to the end of transfer */
Cezary Gapinski86026632018-12-24 23:00:33 +0100730 ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE |
731 STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200732
733 spin_lock_irqsave(&spi->lock, flags);
734
735 stm32_spi_enable(spi);
736
737 /* Be sure to have data in fifo before starting data transfer */
738 if (spi->tx_buf)
739 stm32_spi_write_txfifo(spi);
740
Cezary Gapinski86026632018-12-24 23:00:33 +0100741 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200742
Cezary Gapinski86026632018-12-24 23:00:33 +0100743 writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200744
745 spin_unlock_irqrestore(&spi->lock, flags);
746
747 return 1;
748}
749
750/**
Cezary Gapinskif8bb12f2018-12-24 23:00:36 +0100751 * stm32_spi_transfer_one_dma_start - Set SPI driver registers to start transfer
752 * using DMA
753 */
754static void stm32_spi_transfer_one_dma_start(struct stm32_spi *spi)
755{
756 /* Enable the interrupts relative to the end of transfer */
757 stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
758 STM32H7_SPI_IER_TXTFIE |
759 STM32H7_SPI_IER_OVRIE |
760 STM32H7_SPI_IER_MODFIE);
761
762 stm32_spi_enable(spi);
763
764 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
765}
766
767/**
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200768 * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
769 *
770 * It must returns 0 if the transfer is finished or 1 if the transfer is still
771 * in progress.
772 */
773static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
774 struct spi_transfer *xfer)
775{
776 struct dma_slave_config tx_dma_conf, rx_dma_conf;
777 struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
778 unsigned long flags;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200779
780 spin_lock_irqsave(&spi->lock, flags);
781
782 rx_dma_desc = NULL;
Cezary Gapinski2cbee7f2018-12-24 23:00:29 +0100783 if (spi->rx_buf && spi->dma_rx) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200784 stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
785 dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
786
787 /* Enable Rx DMA request */
Cezary Gapinski86026632018-12-24 23:00:33 +0100788 stm32_spi_set_bits(spi, STM32H7_SPI_CFG1,
789 STM32H7_SPI_CFG1_RXDMAEN);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200790
791 rx_dma_desc = dmaengine_prep_slave_sg(
792 spi->dma_rx, xfer->rx_sg.sgl,
793 xfer->rx_sg.nents,
794 rx_dma_conf.direction,
795 DMA_PREP_INTERRUPT);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200796 }
797
798 tx_dma_desc = NULL;
Cezary Gapinski2cbee7f2018-12-24 23:00:29 +0100799 if (spi->tx_buf && spi->dma_tx) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200800 stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
801 dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
802
803 tx_dma_desc = dmaengine_prep_slave_sg(
804 spi->dma_tx, xfer->tx_sg.sgl,
805 xfer->tx_sg.nents,
806 tx_dma_conf.direction,
807 DMA_PREP_INTERRUPT);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200808 }
809
Cezary Gapinski2cbee7f2018-12-24 23:00:29 +0100810 if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) ||
811 (spi->rx_buf && spi->dma_rx && !rx_dma_desc))
812 goto dma_desc_error;
813
814 if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc))
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200815 goto dma_desc_error;
816
817 if (rx_dma_desc) {
Amelie Delaunay7b821a62017-06-27 17:45:20 +0200818 rx_dma_desc->callback = stm32_spi_dma_cb;
819 rx_dma_desc->callback_param = spi;
820
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200821 if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
822 dev_err(spi->dev, "Rx DMA submit failed\n");
823 goto dma_desc_error;
824 }
825 /* Enable Rx DMA channel */
826 dma_async_issue_pending(spi->dma_rx);
827 }
828
829 if (tx_dma_desc) {
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100830 if (spi->cur_comm == SPI_SIMPLEX_TX ||
831 spi->cur_comm == SPI_3WIRE_TX) {
Amelie Delaunay7b821a62017-06-27 17:45:20 +0200832 tx_dma_desc->callback = stm32_spi_dma_cb;
833 tx_dma_desc->callback_param = spi;
834 }
835
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200836 if (dma_submit_error(dmaengine_submit(tx_dma_desc))) {
837 dev_err(spi->dev, "Tx DMA submit failed\n");
838 goto dma_submit_error;
839 }
840 /* Enable Tx DMA channel */
841 dma_async_issue_pending(spi->dma_tx);
842
843 /* Enable Tx DMA request */
Cezary Gapinski86026632018-12-24 23:00:33 +0100844 stm32_spi_set_bits(spi, STM32H7_SPI_CFG1,
845 STM32H7_SPI_CFG1_TXDMAEN);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200846 }
847
Cezary Gapinskif8bb12f2018-12-24 23:00:36 +0100848 stm32_spi_transfer_one_dma_start(spi);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200849
850 spin_unlock_irqrestore(&spi->lock, flags);
851
852 return 1;
853
854dma_submit_error:
Cezary Gapinski2cbee7f2018-12-24 23:00:29 +0100855 if (spi->dma_rx)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200856 dmaengine_terminate_all(spi->dma_rx);
857
858dma_desc_error:
Cezary Gapinski86026632018-12-24 23:00:33 +0100859 stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200860
861 spin_unlock_irqrestore(&spi->lock, flags);
862
863 dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
864
Cezary Gapinski2cbee7f2018-12-24 23:00:29 +0100865 spi->cur_usedma = false;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +0200866 return stm32_spi_transfer_one_irq(spi);
867}
868
869/**
Cezary Gapinski9d5fce12018-12-24 23:00:35 +0100870 * stm32_spi_set_bpw - configure bits per word
871 * @spi: pointer to the spi controller data structure
872 */
873static void stm32_spi_set_bpw(struct stm32_spi *spi)
874{
875 u32 bpw, fthlv;
876 u32 cfg1_clrb = 0, cfg1_setb = 0;
877
878 bpw = spi->cur_bpw - 1;
879
880 cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE;
881 cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
882 STM32H7_SPI_CFG1_DSIZE;
883
884 spi->cur_fthlv = stm32_spi_prepare_fthlv(spi);
885 fthlv = spi->cur_fthlv - 1;
886
887 cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
888 cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) &
889 STM32H7_SPI_CFG1_FTHLV;
890
891 writel_relaxed(
892 (readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
893 ~cfg1_clrb) | cfg1_setb,
894 spi->base + STM32H7_SPI_CFG1);
895}
896
897/**
898 * stm32_spi_set_mbr - Configure baud rate divisor in master mode
899 * @spi: pointer to the spi controller data structure
900 * @mbrdiv: baud rate divisor value
901 */
902static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
903{
904 u32 cfg1_clrb = 0, cfg1_setb = 0;
905
906 cfg1_clrb |= STM32H7_SPI_CFG1_MBR;
907 cfg1_setb |= ((u32)mbrdiv << STM32H7_SPI_CFG1_MBR_SHIFT) &
908 STM32H7_SPI_CFG1_MBR;
909
910 writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
911 ~cfg1_clrb) | cfg1_setb,
912 spi->base + STM32H7_SPI_CFG1);
913}
914
915/**
916 * stm32_spi_communication_type - return transfer communication type
917 * @spi_dev: pointer to the spi device
918 * transfer: pointer to spi transfer
919 */
920static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
921 struct spi_transfer *transfer)
922{
923 unsigned int type = SPI_FULL_DUPLEX;
924
925 if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
926 /*
927 * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
928 * is forbidden and unvalidated by SPI subsystem so depending
929 * on the valid buffer, we can determine the direction of the
930 * transfer.
931 */
932 if (!transfer->tx_buf)
933 type = SPI_3WIRE_RX;
934 else
935 type = SPI_3WIRE_TX;
936 } else {
937 if (!transfer->tx_buf)
938 type = SPI_SIMPLEX_RX;
939 else if (!transfer->rx_buf)
940 type = SPI_SIMPLEX_TX;
941 }
942
943 return type;
944}
945
946/**
947 * stm32_spi_set_mode - configure communication mode
948 * @spi: pointer to the spi controller data structure
949 * @comm_type: type of communication to configure
950 */
951static int stm32_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
952{
953 u32 mode;
954 u32 cfg2_clrb = 0, cfg2_setb = 0;
955
956 if (comm_type == SPI_3WIRE_RX) {
957 mode = STM32H7_SPI_HALF_DUPLEX;
958 stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
959 } else if (comm_type == SPI_3WIRE_TX) {
960 mode = STM32H7_SPI_HALF_DUPLEX;
961 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
962 } else if (comm_type == SPI_SIMPLEX_RX) {
963 mode = STM32H7_SPI_SIMPLEX_RX;
964 } else if (comm_type == SPI_SIMPLEX_TX) {
965 mode = STM32H7_SPI_SIMPLEX_TX;
966 } else {
967 mode = STM32H7_SPI_FULL_DUPLEX;
968 }
969
970 cfg2_clrb |= STM32H7_SPI_CFG2_COMM;
971 cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) &
972 STM32H7_SPI_CFG2_COMM;
973
974 writel_relaxed(
975 (readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
976 ~cfg2_clrb) | cfg2_setb,
977 spi->base + STM32H7_SPI_CFG2);
978
979 return 0;
980}
981
982/**
983 * stm32_spi_data_idleness - configure minimum time delay inserted between two
984 * consecutive data frames in master mode
985 * @spi: pointer to the spi controller data structure
986 * @len: transfer len
987 */
988static void stm32_spi_data_idleness(struct stm32_spi *spi, u32 len)
989{
990 u32 cfg2_clrb = 0, cfg2_setb = 0;
991
992 cfg2_clrb |= STM32H7_SPI_CFG2_MIDI;
993 if ((len > 1) && (spi->cur_midi > 0)) {
994 u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
995 u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
996 (u32)STM32H7_SPI_CFG2_MIDI >>
997 STM32H7_SPI_CFG2_MIDI_SHIFT);
998
999 dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
1000 sck_period_ns, midi, midi * sck_period_ns);
1001 cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) &
1002 STM32H7_SPI_CFG2_MIDI;
1003 }
1004
1005 writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
1006 ~cfg2_clrb) | cfg2_setb,
1007 spi->base + STM32H7_SPI_CFG2);
1008}
1009
1010/**
1011 * stm32_spi_number_of_data - configure number of data at current transfer
1012 * @spi: pointer to the spi controller data structure
1013 * @len: transfer length
1014 */
1015static int stm32_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
1016{
1017 u32 cr2_clrb = 0, cr2_setb = 0;
1018
1019 if (nb_words <= (STM32H7_SPI_CR2_TSIZE >>
1020 STM32H7_SPI_CR2_TSIZE_SHIFT)) {
1021 cr2_clrb |= STM32H7_SPI_CR2_TSIZE;
1022 cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT;
1023 writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) &
1024 ~cr2_clrb) | cr2_setb,
1025 spi->base + STM32H7_SPI_CR2);
1026 } else {
1027 return -EMSGSIZE;
1028 }
1029
1030 return 0;
1031}
1032
1033/**
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001034 * stm32_spi_transfer_one_setup - common setup to transfer a single
1035 * spi_transfer either using DMA or
1036 * interrupts.
1037 */
1038static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
1039 struct spi_device *spi_dev,
1040 struct spi_transfer *transfer)
1041{
1042 unsigned long flags;
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001043 unsigned int comm_type;
1044 int nb_words, ret = 0;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001045
1046 spin_lock_irqsave(&spi->lock, flags);
1047
1048 if (spi->cur_bpw != transfer->bits_per_word) {
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001049 spi->cur_bpw = transfer->bits_per_word;
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001050 stm32_spi_set_bpw(spi);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001051 }
1052
1053 if (spi->cur_speed != transfer->speed_hz) {
Colin Ian Kinga2f07d382017-06-22 17:34:49 +01001054 int mbr;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001055
1056 /* Update spi->cur_speed with real clock speed */
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001057 mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
1058 STM32H7_SPI_MBR_DIV_MIN,
1059 STM32H7_SPI_MBR_DIV_MAX);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001060 if (mbr < 0) {
1061 ret = mbr;
1062 goto out;
1063 }
1064
1065 transfer->speed_hz = spi->cur_speed;
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001066 stm32_spi_set_mbr(spi, mbr);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001067 }
1068
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001069 comm_type = stm32_spi_communication_type(spi_dev, transfer);
1070 if (spi->cur_comm != comm_type) {
1071 stm32_spi_set_mode(spi, comm_type);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001072
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001073 if (ret < 0)
1074 goto out;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001075
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001076 spi->cur_comm = comm_type;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001077 }
1078
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001079 stm32_spi_data_idleness(spi, transfer->len);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001080
Amelie Delaunay128ebb82017-06-27 17:45:17 +02001081 if (spi->cur_bpw <= 8)
1082 nb_words = transfer->len;
1083 else if (spi->cur_bpw <= 16)
1084 nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
1085 else
1086 nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001087
Cezary Gapinski9d5fce12018-12-24 23:00:35 +01001088 ret = stm32_spi_number_of_data(spi, nb_words);
1089 if (ret < 0)
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001090 goto out;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001091
1092 spi->cur_xferlen = transfer->len;
1093
1094 dev_dbg(spi->dev, "transfer communication mode set to %d\n",
1095 spi->cur_comm);
1096 dev_dbg(spi->dev,
1097 "data frame of %d-bit, data packet of %d data frames\n",
1098 spi->cur_bpw, spi->cur_fthlv);
1099 dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
1100 dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
1101 spi->cur_xferlen, nb_words);
1102 dev_dbg(spi->dev, "dma %s\n",
1103 (spi->cur_usedma) ? "enabled" : "disabled");
1104
1105out:
1106 spin_unlock_irqrestore(&spi->lock, flags);
1107
1108 return ret;
1109}
1110
1111/**
1112 * stm32_spi_transfer_one - transfer a single spi_transfer
1113 *
1114 * It must return 0 if the transfer is finished or 1 if the transfer is still
1115 * in progress.
1116 */
1117static int stm32_spi_transfer_one(struct spi_master *master,
1118 struct spi_device *spi_dev,
1119 struct spi_transfer *transfer)
1120{
1121 struct stm32_spi *spi = spi_master_get_devdata(master);
1122 int ret;
1123
1124 spi->tx_buf = transfer->tx_buf;
1125 spi->rx_buf = transfer->rx_buf;
1126 spi->tx_len = spi->tx_buf ? transfer->len : 0;
1127 spi->rx_len = spi->rx_buf ? transfer->len : 0;
1128
Amelie Delaunayc67ad362017-06-27 17:45:19 +02001129 spi->cur_usedma = (master->can_dma &&
Cezary Gapinski2cbee7f2018-12-24 23:00:29 +01001130 master->can_dma(master, spi_dev, transfer));
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001131
1132 ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
1133 if (ret) {
1134 dev_err(spi->dev, "SPI transfer setup failed\n");
1135 return ret;
1136 }
1137
1138 if (spi->cur_usedma)
1139 return stm32_spi_transfer_one_dma(spi, transfer);
1140 else
1141 return stm32_spi_transfer_one_irq(spi);
1142}
1143
1144/**
1145 * stm32_spi_unprepare_msg - relax the hardware
1146 *
1147 * Normally, if TSIZE has been configured, we should relax the hardware at the
1148 * reception of the EOT interrupt. But in case of error, EOT will not be
1149 * raised. So the subsystem unprepare_message call allows us to properly
1150 * complete the transfer from an hardware point of view.
1151 */
1152static int stm32_spi_unprepare_msg(struct spi_master *master,
1153 struct spi_message *msg)
1154{
1155 struct stm32_spi *spi = spi_master_get_devdata(master);
1156
1157 stm32_spi_disable(spi);
1158
1159 return 0;
1160}
1161
1162/**
1163 * stm32_spi_config - Configure SPI controller as SPI master
1164 */
1165static int stm32_spi_config(struct stm32_spi *spi)
1166{
1167 unsigned long flags;
1168
1169 spin_lock_irqsave(&spi->lock, flags);
1170
1171 /* Ensure I2SMOD bit is kept cleared */
Cezary Gapinski86026632018-12-24 23:00:33 +01001172 stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR,
1173 STM32H7_SPI_I2SCFGR_I2SMOD);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001174
1175 /*
1176 * - SS input value high
1177 * - transmitter half duplex direction
1178 * - automatic communication suspend when RX-Fifo is full
1179 */
Cezary Gapinski86026632018-12-24 23:00:33 +01001180 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI |
1181 STM32H7_SPI_CR1_HDDIR |
1182 STM32H7_SPI_CR1_MASRX);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001183
1184 /*
1185 * - Set the master mode (default Motorola mode)
1186 * - Consider 1 master/n slaves configuration and
1187 * SS input value is determined by the SSI bit
1188 * - keep control of all associated GPIOs
1189 */
Cezary Gapinski86026632018-12-24 23:00:33 +01001190 stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER |
1191 STM32H7_SPI_CFG2_SSM |
1192 STM32H7_SPI_CFG2_AFCNTR);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001193
1194 spin_unlock_irqrestore(&spi->lock, flags);
1195
1196 return 0;
1197}
1198
1199static const struct of_device_id stm32_spi_of_match[] = {
Amelie Delaunayc5fe2fa2017-06-27 17:45:14 +02001200 { .compatible = "st,stm32h7-spi", },
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001201 {},
1202};
1203MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
1204
1205static int stm32_spi_probe(struct platform_device *pdev)
1206{
1207 struct spi_master *master;
1208 struct stm32_spi *spi;
1209 struct resource *res;
1210 int i, ret;
1211
1212 master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
1213 if (!master) {
1214 dev_err(&pdev->dev, "spi master allocation failed\n");
1215 return -ENOMEM;
1216 }
1217 platform_set_drvdata(pdev, master);
1218
1219 spi = spi_master_get_devdata(master);
1220 spi->dev = &pdev->dev;
1221 spi->master = master;
1222 spin_lock_init(&spi->lock);
1223
1224 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1225 spi->base = devm_ioremap_resource(&pdev->dev, res);
1226 if (IS_ERR(spi->base)) {
1227 ret = PTR_ERR(spi->base);
1228 goto err_master_put;
1229 }
1230 spi->phys_addr = (dma_addr_t)res->start;
1231
1232 spi->irq = platform_get_irq(pdev, 0);
1233 if (spi->irq <= 0) {
1234 dev_err(&pdev->dev, "no irq: %d\n", spi->irq);
1235 ret = -ENOENT;
1236 goto err_master_put;
1237 }
1238 ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL,
Cezary Gapinskia9675332018-12-24 23:00:34 +01001239 stm32_spi_irq_thread, IRQF_ONESHOT,
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001240 pdev->name, master);
1241 if (ret) {
1242 dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
1243 ret);
1244 goto err_master_put;
1245 }
1246
Cezary Gapinskid4c91342018-12-24 23:00:28 +01001247 spi->clk = devm_clk_get(&pdev->dev, NULL);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001248 if (IS_ERR(spi->clk)) {
1249 ret = PTR_ERR(spi->clk);
1250 dev_err(&pdev->dev, "clk get failed: %d\n", ret);
1251 goto err_master_put;
1252 }
1253
1254 ret = clk_prepare_enable(spi->clk);
1255 if (ret) {
1256 dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
1257 goto err_master_put;
1258 }
1259 spi->clk_rate = clk_get_rate(spi->clk);
1260 if (!spi->clk_rate) {
1261 dev_err(&pdev->dev, "clk rate = 0\n");
1262 ret = -EINVAL;
Alexey Khoroshilov3dbb3ee2018-03-30 22:54:44 +03001263 goto err_clk_disable;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001264 }
1265
Philipp Zabeld5e9a4a2017-07-19 17:26:20 +02001266 spi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001267 if (!IS_ERR(spi->rst)) {
1268 reset_control_assert(spi->rst);
1269 udelay(2);
1270 reset_control_deassert(spi->rst);
1271 }
1272
1273 spi->fifo_size = stm32_spi_get_fifo_size(spi);
1274
1275 ret = stm32_spi_config(spi);
1276 if (ret) {
1277 dev_err(&pdev->dev, "controller configuration failed: %d\n",
1278 ret);
1279 goto err_clk_disable;
1280 }
1281
1282 master->dev.of_node = pdev->dev.of_node;
1283 master->auto_runtime_pm = true;
1284 master->bus_num = pdev->id;
Cezary Gapinskid6cea112018-12-24 23:00:31 +01001285 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
Cezary Gapinski6962b052018-12-24 23:00:32 +01001286 SPI_3WIRE;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001287 master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi);
Cezary Gapinski86026632018-12-24 23:00:33 +01001288 master->max_speed_hz = spi->clk_rate / STM32H7_SPI_MBR_DIV_MIN;
1289 master->min_speed_hz = spi->clk_rate / STM32H7_SPI_MBR_DIV_MAX;
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001290 master->setup = stm32_spi_setup;
1291 master->prepare_message = stm32_spi_prepare_msg;
1292 master->transfer_one = stm32_spi_transfer_one;
1293 master->unprepare_message = stm32_spi_unprepare_msg;
1294
1295 spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
1296 if (!spi->dma_tx)
1297 dev_warn(&pdev->dev, "failed to request tx dma channel\n");
1298 else
1299 master->dma_tx = spi->dma_tx;
1300
1301 spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
1302 if (!spi->dma_rx)
1303 dev_warn(&pdev->dev, "failed to request rx dma channel\n");
1304 else
1305 master->dma_rx = spi->dma_rx;
1306
1307 if (spi->dma_tx || spi->dma_rx)
1308 master->can_dma = stm32_spi_can_dma;
1309
Amelie Delaunay038ac862017-06-27 17:45:18 +02001310 pm_runtime_set_active(&pdev->dev);
1311 pm_runtime_enable(&pdev->dev);
1312
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001313 ret = devm_spi_register_master(&pdev->dev, master);
1314 if (ret) {
1315 dev_err(&pdev->dev, "spi master registration failed: %d\n",
1316 ret);
1317 goto err_dma_release;
1318 }
1319
1320 if (!master->cs_gpios) {
1321 dev_err(&pdev->dev, "no CS gpios available\n");
1322 ret = -EINVAL;
1323 goto err_dma_release;
1324 }
1325
1326 for (i = 0; i < master->num_chipselect; i++) {
1327 if (!gpio_is_valid(master->cs_gpios[i])) {
1328 dev_err(&pdev->dev, "%i is not a valid gpio\n",
1329 master->cs_gpios[i]);
1330 ret = -EINVAL;
1331 goto err_dma_release;
1332 }
1333
1334 ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
1335 DRIVER_NAME);
1336 if (ret) {
1337 dev_err(&pdev->dev, "can't get CS gpio %i\n",
1338 master->cs_gpios[i]);
1339 goto err_dma_release;
1340 }
1341 }
1342
1343 dev_info(&pdev->dev, "driver initialized\n");
1344
1345 return 0;
1346
1347err_dma_release:
1348 if (spi->dma_tx)
1349 dma_release_channel(spi->dma_tx);
1350 if (spi->dma_rx)
1351 dma_release_channel(spi->dma_rx);
Amelie Delaunay038ac862017-06-27 17:45:18 +02001352
1353 pm_runtime_disable(&pdev->dev);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001354err_clk_disable:
1355 clk_disable_unprepare(spi->clk);
1356err_master_put:
1357 spi_master_put(master);
1358
1359 return ret;
1360}
1361
1362static int stm32_spi_remove(struct platform_device *pdev)
1363{
1364 struct spi_master *master = platform_get_drvdata(pdev);
1365 struct stm32_spi *spi = spi_master_get_devdata(master);
1366
1367 stm32_spi_disable(spi);
1368
1369 if (master->dma_tx)
1370 dma_release_channel(master->dma_tx);
1371 if (master->dma_rx)
1372 dma_release_channel(master->dma_rx);
1373
1374 clk_disable_unprepare(spi->clk);
1375
Amelie Delaunay038ac862017-06-27 17:45:18 +02001376 pm_runtime_disable(&pdev->dev);
1377
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001378 return 0;
1379}
1380
Amelie Delaunay038ac862017-06-27 17:45:18 +02001381#ifdef CONFIG_PM
1382static int stm32_spi_runtime_suspend(struct device *dev)
1383{
1384 struct spi_master *master = dev_get_drvdata(dev);
1385 struct stm32_spi *spi = spi_master_get_devdata(master);
1386
1387 clk_disable_unprepare(spi->clk);
1388
1389 return 0;
1390}
1391
1392static int stm32_spi_runtime_resume(struct device *dev)
1393{
1394 struct spi_master *master = dev_get_drvdata(dev);
1395 struct stm32_spi *spi = spi_master_get_devdata(master);
1396
1397 return clk_prepare_enable(spi->clk);
1398}
1399#endif
1400
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001401#ifdef CONFIG_PM_SLEEP
1402static int stm32_spi_suspend(struct device *dev)
1403{
1404 struct spi_master *master = dev_get_drvdata(dev);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001405 int ret;
1406
1407 ret = spi_master_suspend(master);
1408 if (ret)
1409 return ret;
1410
Amelie Delaunay038ac862017-06-27 17:45:18 +02001411 return pm_runtime_force_suspend(dev);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001412}
1413
1414static int stm32_spi_resume(struct device *dev)
1415{
1416 struct spi_master *master = dev_get_drvdata(dev);
1417 struct stm32_spi *spi = spi_master_get_devdata(master);
1418 int ret;
1419
Amelie Delaunay038ac862017-06-27 17:45:18 +02001420 ret = pm_runtime_force_resume(dev);
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001421 if (ret)
1422 return ret;
Amelie Delaunay038ac862017-06-27 17:45:18 +02001423
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001424 ret = spi_master_resume(master);
1425 if (ret)
1426 clk_disable_unprepare(spi->clk);
1427
1428 return ret;
1429}
1430#endif
1431
Amelie Delaunay038ac862017-06-27 17:45:18 +02001432static const struct dev_pm_ops stm32_spi_pm_ops = {
1433 SET_SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume)
1434 SET_RUNTIME_PM_OPS(stm32_spi_runtime_suspend,
1435 stm32_spi_runtime_resume, NULL)
1436};
Amelie Delaunaydcbe0d82017-06-21 16:32:06 +02001437
1438static struct platform_driver stm32_spi_driver = {
1439 .probe = stm32_spi_probe,
1440 .remove = stm32_spi_remove,
1441 .driver = {
1442 .name = DRIVER_NAME,
1443 .pm = &stm32_spi_pm_ops,
1444 .of_match_table = stm32_spi_of_match,
1445 },
1446};
1447
1448module_platform_driver(stm32_spi_driver);
1449
1450MODULE_ALIAS("platform:" DRIVER_NAME);
1451MODULE_DESCRIPTION("STMicroelectronics STM32 SPI Controller driver");
1452MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
1453MODULE_LICENSE("GPL v2");