blob: 3230d37fa89a83d2e2091aaa11fe6b1ed484dd0a [file] [log] [blame]
Chris Bootf8043872013-03-11 21:38:24 -06001/*
2 * Driver for Broadcom BCM2835 SPI Controllers
3 *
4 * Copyright (C) 2012 Chris Boot
5 * Copyright (C) 2013 Stephen Warren
Martin Sperle34ff012015-03-26 11:08:36 +01006 * Copyright (C) 2015 Martin Sperl
Chris Bootf8043872013-03-11 21:38:24 -06007 *
8 * This driver is inspired by:
9 * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
10 * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
Chris Bootf8043872013-03-11 21:38:24 -060021 */
22
23#include <linux/clk.h>
24#include <linux/completion.h>
25#include <linux/delay.h>
Martin Sperl3ecd37e2015-05-10 20:47:28 +000026#include <linux/dma-mapping.h>
27#include <linux/dmaengine.h>
Chris Bootf8043872013-03-11 21:38:24 -060028#include <linux/err.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/of.h>
Martin Sperl3ecd37e2015-05-10 20:47:28 +000034#include <linux/of_address.h>
Chris Bootf8043872013-03-11 21:38:24 -060035#include <linux/of_device.h>
Martin Sperl3ecd37e2015-05-10 20:47:28 +000036#include <linux/of_gpio.h>
37#include <linux/of_irq.h>
Chris Bootf8043872013-03-11 21:38:24 -060038#include <linux/spi/spi.h>
39
40/* SPI register offsets */
41#define BCM2835_SPI_CS 0x00
42#define BCM2835_SPI_FIFO 0x04
43#define BCM2835_SPI_CLK 0x08
44#define BCM2835_SPI_DLEN 0x0c
45#define BCM2835_SPI_LTOH 0x10
46#define BCM2835_SPI_DC 0x14
47
48/* Bitfields in CS */
49#define BCM2835_SPI_CS_LEN_LONG 0x02000000
50#define BCM2835_SPI_CS_DMA_LEN 0x01000000
51#define BCM2835_SPI_CS_CSPOL2 0x00800000
52#define BCM2835_SPI_CS_CSPOL1 0x00400000
53#define BCM2835_SPI_CS_CSPOL0 0x00200000
54#define BCM2835_SPI_CS_RXF 0x00100000
55#define BCM2835_SPI_CS_RXR 0x00080000
56#define BCM2835_SPI_CS_TXD 0x00040000
57#define BCM2835_SPI_CS_RXD 0x00020000
58#define BCM2835_SPI_CS_DONE 0x00010000
59#define BCM2835_SPI_CS_LEN 0x00002000
60#define BCM2835_SPI_CS_REN 0x00001000
61#define BCM2835_SPI_CS_ADCS 0x00000800
62#define BCM2835_SPI_CS_INTR 0x00000400
63#define BCM2835_SPI_CS_INTD 0x00000200
64#define BCM2835_SPI_CS_DMAEN 0x00000100
65#define BCM2835_SPI_CS_TA 0x00000080
66#define BCM2835_SPI_CS_CSPOL 0x00000040
67#define BCM2835_SPI_CS_CLEAR_RX 0x00000020
68#define BCM2835_SPI_CS_CLEAR_TX 0x00000010
69#define BCM2835_SPI_CS_CPOL 0x00000008
70#define BCM2835_SPI_CS_CPHA 0x00000004
71#define BCM2835_SPI_CS_CS_10 0x00000002
72#define BCM2835_SPI_CS_CS_01 0x00000001
73
Lukas Wunner2e0733b2018-11-29 16:45:24 +010074#define BCM2835_SPI_FIFO_SIZE 64
75#define BCM2835_SPI_FIFO_SIZE_3_4 48
Martin Sperl3ecd37e2015-05-10 20:47:28 +000076#define BCM2835_SPI_DMA_MIN_LENGTH 96
Martin Sperl69352242015-03-19 09:01:53 +000077#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
78 | SPI_NO_CS | SPI_3WIRE)
Chris Bootf8043872013-03-11 21:38:24 -060079
80#define DRV_NAME "spi-bcm2835"
81
Martin Sperlff245d92019-04-23 20:15:11 +000082/* define polling limits */
83unsigned int polling_limit_us = 30;
84module_param(polling_limit_us, uint, 0664);
85MODULE_PARM_DESC(polling_limit_us,
86 "time in us to run a transfer in polling mode\n");
87
Lukas Wunneracf0f852018-11-08 08:06:10 +010088/**
89 * struct bcm2835_spi - BCM2835 SPI controller
90 * @regs: base address of register map
91 * @clk: core clock, divided to calculate serial clock
92 * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
Lukas Wunner3bd7f652018-11-08 08:06:10 +010093 * @tfr: SPI transfer currently processed
Lukas Wunneracf0f852018-11-08 08:06:10 +010094 * @tx_buf: pointer whence next transmitted byte is read
95 * @rx_buf: pointer where next received byte is written
96 * @tx_len: remaining bytes to transmit
97 * @rx_len: remaining bytes to receive
Lukas Wunner3bd7f652018-11-08 08:06:10 +010098 * @tx_prologue: bytes transmitted without DMA if first TX sglist entry's
99 * length is not a multiple of 4 (to overcome hardware limitation)
100 * @rx_prologue: bytes received without DMA if first RX sglist entry's
101 * length is not a multiple of 4 (to overcome hardware limitation)
102 * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
Lukas Wunneracf0f852018-11-08 08:06:10 +0100103 * @dma_pending: whether a DMA transfer is in progress
104 */
Chris Bootf8043872013-03-11 21:38:24 -0600105struct bcm2835_spi {
106 void __iomem *regs;
107 struct clk *clk;
108 int irq;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100109 struct spi_transfer *tfr;
Chris Bootf8043872013-03-11 21:38:24 -0600110 const u8 *tx_buf;
111 u8 *rx_buf;
Martin Sperle34ff012015-03-26 11:08:36 +0100112 int tx_len;
113 int rx_len;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100114 int tx_prologue;
115 int rx_prologue;
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100116 unsigned int tx_spillover;
Lukas Wunner29bdedf2018-11-29 15:14:49 +0100117 unsigned int dma_pending;
Chris Bootf8043872013-03-11 21:38:24 -0600118};
119
120static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
121{
122 return readl(bs->regs + reg);
123}
124
125static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val)
126{
127 writel(val, bs->regs + reg);
128}
129
Martin Sperl4adf3122015-03-23 15:11:53 +0100130static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
Chris Bootf8043872013-03-11 21:38:24 -0600131{
132 u8 byte;
133
Martin Sperle34ff012015-03-26 11:08:36 +0100134 while ((bs->rx_len) &&
135 (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
Chris Bootf8043872013-03-11 21:38:24 -0600136 byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
137 if (bs->rx_buf)
138 *bs->rx_buf++ = byte;
Martin Sperle34ff012015-03-26 11:08:36 +0100139 bs->rx_len--;
Chris Bootf8043872013-03-11 21:38:24 -0600140 }
141}
142
Martin Sperl4adf3122015-03-23 15:11:53 +0100143static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
Chris Bootf8043872013-03-11 21:38:24 -0600144{
145 u8 byte;
146
Martin Sperle34ff012015-03-26 11:08:36 +0100147 while ((bs->tx_len) &&
Martin Sperl4adf3122015-03-23 15:11:53 +0100148 (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
Chris Bootf8043872013-03-11 21:38:24 -0600149 byte = bs->tx_buf ? *bs->tx_buf++ : 0;
150 bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
Martin Sperle34ff012015-03-26 11:08:36 +0100151 bs->tx_len--;
Chris Bootf8043872013-03-11 21:38:24 -0600152 }
153}
154
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100155/**
156 * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
157 * @bs: BCM2835 SPI controller
158 * @count: bytes to read from RX FIFO
159 *
160 * The caller must ensure that @bs->rx_len is greater than or equal to @count,
161 * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
162 * in the CS register is set (such that a read from the FIFO register receives
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100163 * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100164 */
165static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
166{
167 u32 val;
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100168 int len;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100169
170 bs->rx_len -= count;
171
172 while (count > 0) {
173 val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100174 len = min(count, 4);
175 memcpy(bs->rx_buf, &val, len);
176 bs->rx_buf += len;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100177 count -= 4;
178 }
179}
180
181/**
182 * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
183 * @bs: BCM2835 SPI controller
184 * @count: bytes to write to TX FIFO
185 *
186 * The caller must ensure that @bs->tx_len is greater than or equal to @count,
187 * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag
188 * in the CS register is set (such that a write to the FIFO register transmits
189 * 32-bit instead of just 8-bit).
190 */
191static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
192{
193 u32 val;
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100194 int len;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100195
196 bs->tx_len -= count;
197
198 while (count > 0) {
199 if (bs->tx_buf) {
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100200 len = min(count, 4);
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100201 memcpy(&val, bs->tx_buf, len);
202 bs->tx_buf += len;
203 } else {
204 val = 0;
205 }
206 bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
207 count -= 4;
208 }
209}
210
211/**
212 * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
213 * @bs: BCM2835 SPI controller
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100214 *
215 * The caller must ensure that the RX FIFO can accommodate as many bytes
216 * as have been written to the TX FIFO: Transmission is halted once the
217 * RX FIFO is full, causing this function to spin forever.
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100218 */
219static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
220{
221 while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
222 cpu_relax();
223}
224
Lukas Wunner2e0733b2018-11-29 16:45:24 +0100225/**
226 * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
227 * @bs: BCM2835 SPI controller
228 * @count: bytes available for reading in RX FIFO
229 */
230static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
231{
232 u8 val;
233
234 count = min(count, bs->rx_len);
235 bs->rx_len -= count;
236
237 while (count) {
238 val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
239 if (bs->rx_buf)
240 *bs->rx_buf++ = val;
241 count--;
242 }
243}
244
245/**
246 * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
247 * @bs: BCM2835 SPI controller
248 * @count: bytes available for writing in TX FIFO
249 */
250static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
251{
252 u8 val;
253
254 count = min(count, bs->tx_len);
255 bs->tx_len -= count;
256
257 while (count) {
258 val = bs->tx_buf ? *bs->tx_buf++ : 0;
259 bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
260 count--;
261 }
262}
263
Martin Sperle34ff012015-03-26 11:08:36 +0100264static void bcm2835_spi_reset_hw(struct spi_master *master)
265{
266 struct bcm2835_spi *bs = spi_master_get_devdata(master);
267 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
268
269 /* Disable SPI interrupts and transfer */
270 cs &= ~(BCM2835_SPI_CS_INTR |
271 BCM2835_SPI_CS_INTD |
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000272 BCM2835_SPI_CS_DMAEN |
Martin Sperle34ff012015-03-26 11:08:36 +0100273 BCM2835_SPI_CS_TA);
274 /* and reset RX/TX FIFOS */
275 cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
276
277 /* and reset the SPI_HW */
278 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000279 /* as well as DLEN */
280 bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
Martin Sperle34ff012015-03-26 11:08:36 +0100281}
282
Chris Bootf8043872013-03-11 21:38:24 -0600283static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
284{
285 struct spi_master *master = dev_id;
286 struct bcm2835_spi *bs = spi_master_get_devdata(master);
Lukas Wunner2e0733b2018-11-29 16:45:24 +0100287 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
288
289 /*
290 * An interrupt is signaled either if DONE is set (TX FIFO empty)
291 * or if RXR is set (RX FIFO >= ¾ full).
292 */
293 if (cs & BCM2835_SPI_CS_RXF)
294 bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
295 else if (cs & BCM2835_SPI_CS_RXR)
296 bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
297
298 if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
299 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
Chris Bootf8043872013-03-11 21:38:24 -0600300
Martin Sperl4adf3122015-03-23 15:11:53 +0100301 /* Read as many bytes as possible from FIFO */
302 bcm2835_rd_fifo(bs);
Martin Sperle34ff012015-03-26 11:08:36 +0100303 /* Write as many bytes as possible to FIFO */
304 bcm2835_wr_fifo(bs);
Chris Bootf8043872013-03-11 21:38:24 -0600305
Lukas Wunner56c17232018-11-08 08:06:10 +0100306 if (!bs->rx_len) {
Martin Sperle34ff012015-03-26 11:08:36 +0100307 /* Transfer complete - reset SPI HW */
308 bcm2835_spi_reset_hw(master);
309 /* wake up the framework */
310 complete(&master->xfer_completion);
Chris Bootf8043872013-03-11 21:38:24 -0600311 }
312
Martin Sperl4adf3122015-03-23 15:11:53 +0100313 return IRQ_HANDLED;
Chris Bootf8043872013-03-11 21:38:24 -0600314}
315
Martin Sperl704f32d2015-04-06 17:16:30 +0000316static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
317 struct spi_device *spi,
318 struct spi_transfer *tfr,
Lukas Wunner2e0733b2018-11-29 16:45:24 +0100319 u32 cs, bool fifo_empty)
Martin Sperl704f32d2015-04-06 17:16:30 +0000320{
321 struct bcm2835_spi *bs = spi_master_get_devdata(master);
Chris Bootf8043872013-03-11 21:38:24 -0600322
Chris Bootf8043872013-03-11 21:38:24 -0600323 /*
Lukas Wunner5c09e422018-11-08 08:06:10 +0100324 * Enable HW block, but with interrupts still disabled.
325 * Otherwise the empty TX FIFO would immediately trigger an interrupt.
Chris Bootf8043872013-03-11 21:38:24 -0600326 */
Lukas Wunner5c09e422018-11-08 08:06:10 +0100327 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
328
329 /* fill TX FIFO as much as possible */
Lukas Wunner2e0733b2018-11-29 16:45:24 +0100330 if (fifo_empty)
331 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
Lukas Wunner5c09e422018-11-08 08:06:10 +0100332 bcm2835_wr_fifo(bs);
333
334 /* enable interrupts */
Martin Sperle34ff012015-03-26 11:08:36 +0100335 cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
Chris Bootf8043872013-03-11 21:38:24 -0600336 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
337
Martin Sperle34ff012015-03-26 11:08:36 +0100338 /* signal that we need to wait for completion */
339 return 1;
Chris Bootf8043872013-03-11 21:38:24 -0600340}
341
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100342/**
343 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
344 * @master: SPI master
345 * @tfr: SPI transfer
346 * @bs: BCM2835 SPI controller
347 * @cs: CS register
348 *
349 * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
350 * Only the final write access is permitted to transmit less than 4 bytes, the
351 * SPI controller deduces its intended size from the DLEN register.
352 *
353 * If a TX or RX sglist contains multiple entries, one per page, and the first
354 * entry starts in the middle of a page, that first entry's length may not be
355 * a multiple of 4. Subsequent entries are fine because they span an entire
356 * page, hence do have a length that's a multiple of 4.
357 *
358 * This cannot happen with kmalloc'ed buffers (which is what most clients use)
359 * because they are contiguous in physical memory and therefore not split on
360 * page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed
361 * buffers.
362 *
363 * The DMA engine is incapable of combining sglist entries into a continuous
364 * stream of 4 byte chunks, it treats every entry separately: A TX entry is
365 * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
366 * entry is rounded up by throwing away received bytes.
367 *
368 * Overcome this limitation by transferring the first few bytes without DMA:
369 * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
370 * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
371 * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
372 * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
373 *
374 * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
375 * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
376 * Caution, the additional 4 bytes spill over to the second TX sglist entry
377 * if the length of the first is *exactly* 1.
378 *
379 * At most 6 bytes are written and at most 3 bytes read. Do we know the
380 * transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
381 *
382 * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
383 * by the DMA engine. Toggling the DMA Enable flag in the CS register switches
384 * the width but also garbles the FIFO's contents. The prologue must therefore
385 * be transmitted in 32-bit width to ensure that the following DMA transfer can
386 * pick up the residue in the RX FIFO in ungarbled form.
387 */
388static void bcm2835_spi_transfer_prologue(struct spi_master *master,
389 struct spi_transfer *tfr,
390 struct bcm2835_spi *bs,
391 u32 cs)
392{
393 int tx_remaining;
394
395 bs->tfr = tfr;
396 bs->tx_prologue = 0;
397 bs->rx_prologue = 0;
398 bs->tx_spillover = false;
399
400 if (!sg_is_last(&tfr->tx_sg.sgl[0]))
401 bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
402
403 if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
404 bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
405
406 if (bs->rx_prologue > bs->tx_prologue) {
407 if (sg_is_last(&tfr->tx_sg.sgl[0])) {
408 bs->tx_prologue = bs->rx_prologue;
409 } else {
410 bs->tx_prologue += 4;
411 bs->tx_spillover =
412 !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
413 }
414 }
415 }
416
417 /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
418 if (!bs->tx_prologue)
419 return;
420
421 /* Write and read RX prologue. Adjust first entry in RX sglist. */
422 if (bs->rx_prologue) {
423 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
424 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
425 | BCM2835_SPI_CS_DMAEN);
426 bcm2835_wr_fifo_count(bs, bs->rx_prologue);
427 bcm2835_wait_tx_fifo_empty(bs);
428 bcm2835_rd_fifo_count(bs, bs->rx_prologue);
429 bcm2835_spi_reset_hw(master);
430
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100431 dma_sync_single_for_device(master->dma_rx->device->dev,
432 sg_dma_address(&tfr->rx_sg.sgl[0]),
433 bs->rx_prologue, DMA_FROM_DEVICE);
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100434
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100435 sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
436 sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100437 }
438
439 /*
440 * Write remaining TX prologue. Adjust first entry in TX sglist.
441 * Also adjust second entry if prologue spills over to it.
442 */
443 tx_remaining = bs->tx_prologue - bs->rx_prologue;
444 if (tx_remaining) {
445 bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
446 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
447 | BCM2835_SPI_CS_DMAEN);
448 bcm2835_wr_fifo_count(bs, tx_remaining);
449 bcm2835_wait_tx_fifo_empty(bs);
450 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX);
451 }
452
453 if (likely(!bs->tx_spillover)) {
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100454 sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
455 sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100456 } else {
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100457 sg_dma_len(&tfr->tx_sg.sgl[0]) = 0;
458 sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
459 sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100460 }
461}
462
463/**
464 * bcm2835_spi_undo_prologue() - reconstruct original sglist state
465 * @bs: BCM2835 SPI controller
466 *
467 * Undo changes which were made to an SPI transfer's sglist when transmitting
468 * the prologue. This is necessary to ensure the same memory ranges are
469 * unmapped that were originally mapped.
470 */
471static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
472{
473 struct spi_transfer *tfr = bs->tfr;
474
475 if (!bs->tx_prologue)
476 return;
477
478 if (bs->rx_prologue) {
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100479 sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
480 sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100481 }
482
483 if (likely(!bs->tx_spillover)) {
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100484 sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
485 sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100486 } else {
Lukas Wunnerb31a9292018-11-29 16:45:24 +0100487 sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4;
488 sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
489 sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100490 }
491}
492
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000493static void bcm2835_spi_dma_done(void *data)
494{
495 struct spi_master *master = data;
496 struct bcm2835_spi *bs = spi_master_get_devdata(master);
497
498 /* reset fifo and HW */
499 bcm2835_spi_reset_hw(master);
500
501 /* and terminate tx-dma as we do not have an irq for it
502 * because when the rx dma will terminate and this callback
503 * is called the tx-dma must have finished - can't get to this
504 * situation otherwise...
505 */
Lukas Wunnere82b0b32018-11-08 08:06:10 +0100506 if (cmpxchg(&bs->dma_pending, true, false)) {
Lukas Wunner25277042018-11-29 16:45:24 +0100507 dmaengine_terminate_async(master->dma_tx);
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100508 bcm2835_spi_undo_prologue(bs);
Lukas Wunnere82b0b32018-11-08 08:06:10 +0100509 }
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000510
511 /* and mark as completed */;
512 complete(&master->xfer_completion);
513}
514
515static int bcm2835_spi_prepare_sg(struct spi_master *master,
516 struct spi_transfer *tfr,
517 bool is_tx)
518{
519 struct dma_chan *chan;
520 struct scatterlist *sgl;
521 unsigned int nents;
522 enum dma_transfer_direction dir;
523 unsigned long flags;
524
525 struct dma_async_tx_descriptor *desc;
526 dma_cookie_t cookie;
527
528 if (is_tx) {
529 dir = DMA_MEM_TO_DEV;
530 chan = master->dma_tx;
531 nents = tfr->tx_sg.nents;
532 sgl = tfr->tx_sg.sgl;
533 flags = 0 /* no tx interrupt */;
534
535 } else {
536 dir = DMA_DEV_TO_MEM;
537 chan = master->dma_rx;
538 nents = tfr->rx_sg.nents;
539 sgl = tfr->rx_sg.sgl;
540 flags = DMA_PREP_INTERRUPT;
541 }
542 /* prepare the channel */
543 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
544 if (!desc)
545 return -EINVAL;
546
547 /* set callback for rx */
548 if (!is_tx) {
549 desc->callback = bcm2835_spi_dma_done;
550 desc->callback_param = master;
551 }
552
553 /* submit it to DMA-engine */
554 cookie = dmaengine_submit(desc);
555
556 return dma_submit_error(cookie);
557}
558
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000559static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
560 struct spi_device *spi,
561 struct spi_transfer *tfr,
562 u32 cs)
563{
564 struct bcm2835_spi *bs = spi_master_get_devdata(master);
565 int ret;
566
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100567 /*
568 * Transfer first few bytes without DMA if length of first TX or RX
569 * sglist entry is not a multiple of 4 bytes (hardware limitation).
570 */
571 bcm2835_spi_transfer_prologue(master, tfr, bs, cs);
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000572
573 /* setup tx-DMA */
574 ret = bcm2835_spi_prepare_sg(master, tfr, true);
575 if (ret)
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100576 goto err_reset_hw;
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000577
578 /* start TX early */
579 dma_async_issue_pending(master->dma_tx);
580
581 /* mark as dma pending */
582 bs->dma_pending = 1;
583
584 /* set the DMA length */
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100585 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000586
587 /* start the HW */
588 bcm2835_wr(bs, BCM2835_SPI_CS,
589 cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
590
591 /* setup rx-DMA late - to run transfers while
592 * mapping of the rx buffers still takes place
593 * this saves 10us or more.
594 */
595 ret = bcm2835_spi_prepare_sg(master, tfr, false);
596 if (ret) {
597 /* need to reset on errors */
Lukas Wunner25277042018-11-29 16:45:24 +0100598 dmaengine_terminate_sync(master->dma_tx);
Lukas Wunnerdbc94412018-11-08 08:06:10 +0100599 bs->dma_pending = false;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100600 goto err_reset_hw;
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000601 }
602
603 /* start rx dma late */
604 dma_async_issue_pending(master->dma_rx);
605
606 /* wait for wakeup in framework */
607 return 1;
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100608
609err_reset_hw:
610 bcm2835_spi_reset_hw(master);
611 bcm2835_spi_undo_prologue(bs);
612 return ret;
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000613}
614
615static bool bcm2835_spi_can_dma(struct spi_master *master,
616 struct spi_device *spi,
617 struct spi_transfer *tfr)
618{
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000619 /* we start DMA efforts only on bigger transfers */
620 if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
621 return false;
622
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000623 /* return OK */
624 return true;
625}
626
kbuild test robot29ad1a72015-05-12 19:43:59 +0800627static void bcm2835_dma_release(struct spi_master *master)
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000628{
629 if (master->dma_tx) {
Lukas Wunner25277042018-11-29 16:45:24 +0100630 dmaengine_terminate_sync(master->dma_tx);
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000631 dma_release_channel(master->dma_tx);
632 master->dma_tx = NULL;
633 }
634 if (master->dma_rx) {
Lukas Wunner25277042018-11-29 16:45:24 +0100635 dmaengine_terminate_sync(master->dma_rx);
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000636 dma_release_channel(master->dma_rx);
637 master->dma_rx = NULL;
638 }
639}
640
kbuild test robot29ad1a72015-05-12 19:43:59 +0800641static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000642{
643 struct dma_slave_config slave_config;
644 const __be32 *addr;
645 dma_addr_t dma_reg_base;
646 int ret;
647
648 /* base address in dma-space */
649 addr = of_get_address(master->dev.of_node, 0, NULL, NULL);
650 if (!addr) {
651 dev_err(dev, "could not get DMA-register address - not using dma mode\n");
652 goto err;
653 }
654 dma_reg_base = be32_to_cpup(addr);
655
656 /* get tx/rx dma */
657 master->dma_tx = dma_request_slave_channel(dev, "tx");
658 if (!master->dma_tx) {
659 dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
660 goto err;
661 }
662 master->dma_rx = dma_request_slave_channel(dev, "rx");
663 if (!master->dma_rx) {
664 dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
665 goto err_release;
666 }
667
668 /* configure DMAs */
669 slave_config.direction = DMA_MEM_TO_DEV;
670 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
671 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
672
673 ret = dmaengine_slave_config(master->dma_tx, &slave_config);
674 if (ret)
675 goto err_config;
676
677 slave_config.direction = DMA_DEV_TO_MEM;
678 slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
679 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
680
681 ret = dmaengine_slave_config(master->dma_rx, &slave_config);
682 if (ret)
683 goto err_config;
684
685 /* all went well, so set can_dma */
686 master->can_dma = bcm2835_spi_can_dma;
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000687 /* need to do TX AND RX DMA, so we need dummy buffers */
688 master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
689
690 return;
691
692err_config:
693 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
694 ret);
695err_release:
696 bcm2835_dma_release(master);
697err:
698 return;
699}
700
Martin Sperla750b122015-04-22 07:33:03 +0000701static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
702 struct spi_device *spi,
703 struct spi_transfer *tfr,
Martin Sperl9ac3f902019-04-23 20:15:08 +0000704 u32 cs)
Martin Sperla750b122015-04-22 07:33:03 +0000705{
706 struct bcm2835_spi *bs = spi_master_get_devdata(master);
707 unsigned long timeout;
708
709 /* enable HW block without interrupts */
710 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
711
712 /* fill in the fifo before timeout calculations
713 * if we are interrupted here, then the data is
714 * getting transferred by the HW while we are interrupted
715 */
Lukas Wunner2e0733b2018-11-29 16:45:24 +0100716 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
Martin Sperla750b122015-04-22 07:33:03 +0000717
Martin Sperlff245d92019-04-23 20:15:11 +0000718 /* set the timeout to at least 2 jiffies */
719 timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
Martin Sperla750b122015-04-22 07:33:03 +0000720
721 /* loop until finished the transfer */
722 while (bs->rx_len) {
723 /* fill in tx fifo with remaining data */
724 bcm2835_wr_fifo(bs);
725
726 /* read from fifo as much as possible */
727 bcm2835_rd_fifo(bs);
728
729 /* if there is still data pending to read
730 * then check the timeout
731 */
732 if (bs->rx_len && time_after(jiffies, timeout)) {
733 dev_dbg_ratelimited(&spi->dev,
734 "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
735 jiffies - timeout,
736 bs->tx_len, bs->rx_len);
737 /* fall back to interrupt mode */
738 return bcm2835_spi_transfer_one_irq(master, spi,
Lukas Wunner2e0733b2018-11-29 16:45:24 +0100739 tfr, cs, false);
Martin Sperla750b122015-04-22 07:33:03 +0000740 }
741 }
742
743 /* Transfer complete - reset SPI HW */
744 bcm2835_spi_reset_hw(master);
745 /* and return without waiting for completion */
746 return 0;
747}
748
Martin Sperl704f32d2015-04-06 17:16:30 +0000749static int bcm2835_spi_transfer_one(struct spi_master *master,
750 struct spi_device *spi,
751 struct spi_transfer *tfr)
752{
753 struct bcm2835_spi *bs = spi_master_get_devdata(master);
Martin Sperlff245d92019-04-23 20:15:11 +0000754 unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
755 unsigned long hz_per_byte, byte_limit;
Martin Sperl704f32d2015-04-06 17:16:30 +0000756 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
757
758 /* set clock */
759 spi_hz = tfr->speed_hz;
760 clk_hz = clk_get_rate(bs->clk);
761
762 if (spi_hz >= clk_hz / 2) {
763 cdiv = 2; /* clk_hz/2 is the fastest we can go */
764 } else if (spi_hz) {
765 /* CDIV must be a multiple of two */
766 cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
767 cdiv += (cdiv % 2);
768
769 if (cdiv >= 65536)
770 cdiv = 0; /* 0 is the slowest we can go */
771 } else {
772 cdiv = 0; /* 0 is the slowest we can go */
773 }
774 spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
775 bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
776
Martin Sperlacace732015-07-28 14:03:12 +0000777 /* handle all the 3-wire mode */
Martin Sperl704f32d2015-04-06 17:16:30 +0000778 if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
779 cs |= BCM2835_SPI_CS_REN;
Martin Sperlacace732015-07-28 14:03:12 +0000780 else
781 cs &= ~BCM2835_SPI_CS_REN;
Martin Sperl704f32d2015-04-06 17:16:30 +0000782
Lukas Wunner5c09e422018-11-08 08:06:10 +0100783 /*
784 * The driver always uses software-controlled GPIO Chip Select.
785 * Set the hardware-controlled native Chip Select to an invalid
786 * value to prevent it from interfering.
Martin Sperl704f32d2015-04-06 17:16:30 +0000787 */
Lukas Wunner5c09e422018-11-08 08:06:10 +0100788 cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
Martin Sperl704f32d2015-04-06 17:16:30 +0000789
790 /* set transmit buffers and length */
791 bs->tx_buf = tfr->tx_buf;
792 bs->rx_buf = tfr->rx_buf;
793 bs->tx_len = tfr->len;
794 bs->rx_len = tfr->len;
795
Martin Sperl7f1922e2019-04-23 20:15:09 +0000796 /* Calculate the estimated time in us the transfer runs. Note that
797 * there is 1 idle clocks cycles after each byte getting transferred
798 * so we have 9 cycles/byte. This is used to find the number of Hz
799 * per byte per polling limit. E.g., we can transfer 1 byte in 30 us
800 * per 300,000 Hz of bus clock.
801 */
Martin Sperlff245d92019-04-23 20:15:11 +0000802 hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
803 byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
804
Martin Sperl7f1922e2019-04-23 20:15:09 +0000805 /* run in polling mode for short transfers */
Martin Sperlff245d92019-04-23 20:15:11 +0000806 if (tfr->len < byte_limit)
Martin Sperl9ac3f902019-04-23 20:15:08 +0000807 return bcm2835_spi_transfer_one_poll(master, spi, tfr, cs);
Martin Sperl704f32d2015-04-06 17:16:30 +0000808
Martin Sperlc41d62b2019-04-23 20:15:10 +0000809 /* run in dma mode if conditions are right
810 * Note that unlike poll or interrupt mode DMA mode does not have
811 * this 1 idle clock cycle pattern but runs the spi clock without gaps
812 */
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000813 if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr))
814 return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs);
815
816 /* run in interrupt-mode */
Lukas Wunner2e0733b2018-11-29 16:45:24 +0100817 return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs, true);
Martin Sperl704f32d2015-04-06 17:16:30 +0000818}
819
Martin Sperlacace732015-07-28 14:03:12 +0000820static int bcm2835_spi_prepare_message(struct spi_master *master,
821 struct spi_message *msg)
822{
823 struct spi_device *spi = msg->spi;
824 struct bcm2835_spi *bs = spi_master_get_devdata(master);
825 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
Meghana Madhyastha8b7bd102019-04-13 20:24:14 +0200826 int ret;
827
828 /*
829 * DMA transfers are limited to 16 bit (0 to 65535 bytes) by the SPI HW
830 * due to DLEN. Split up transfers (32-bit FIFO aligned) if the limit is
831 * exceeded.
832 */
833 ret = spi_split_transfers_maxsize(master, msg, 65532,
834 GFP_KERNEL | GFP_DMA);
835 if (ret)
836 return ret;
Martin Sperlacace732015-07-28 14:03:12 +0000837
838 cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
839
840 if (spi->mode & SPI_CPOL)
841 cs |= BCM2835_SPI_CS_CPOL;
842 if (spi->mode & SPI_CPHA)
843 cs |= BCM2835_SPI_CS_CPHA;
844
845 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
846
847 return 0;
848}
849
Martin Sperle34ff012015-03-26 11:08:36 +0100850static void bcm2835_spi_handle_err(struct spi_master *master,
851 struct spi_message *msg)
Chris Bootf8043872013-03-11 21:38:24 -0600852{
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000853 struct bcm2835_spi *bs = spi_master_get_devdata(master);
854
855 /* if an error occurred and we have an active dma, then terminate */
Lukas Wunnere82b0b32018-11-08 08:06:10 +0100856 if (cmpxchg(&bs->dma_pending, true, false)) {
Lukas Wunner25277042018-11-29 16:45:24 +0100857 dmaengine_terminate_sync(master->dma_tx);
858 dmaengine_terminate_sync(master->dma_rx);
Lukas Wunner3bd7f652018-11-08 08:06:10 +0100859 bcm2835_spi_undo_prologue(bs);
Martin Sperl3ecd37e2015-05-10 20:47:28 +0000860 }
861 /* and reset */
Martin Sperle34ff012015-03-26 11:08:36 +0100862 bcm2835_spi_reset_hw(master);
Chris Bootf8043872013-03-11 21:38:24 -0600863}
864
Martin Sperla30a5552015-04-06 17:16:31 +0000865static int chip_match_name(struct gpio_chip *chip, void *data)
866{
867 return !strcmp(chip->label, data);
868}
869
Martin Sperle34ff012015-03-26 11:08:36 +0100870static int bcm2835_spi_setup(struct spi_device *spi)
871{
Martin Sperla30a5552015-04-06 17:16:31 +0000872 int err;
873 struct gpio_chip *chip;
Martin Sperle34ff012015-03-26 11:08:36 +0100874 /*
875 * sanity checking the native-chipselects
876 */
877 if (spi->mode & SPI_NO_CS)
878 return 0;
879 if (gpio_is_valid(spi->cs_gpio))
880 return 0;
Martin Sperla30a5552015-04-06 17:16:31 +0000881 if (spi->chip_select > 1) {
882 /* error in the case of native CS requested with CS > 1
883 * officially there is a CS2, but it is not documented
884 * which GPIO is connected with that...
885 */
886 dev_err(&spi->dev,
887 "setup: only two native chip-selects are supported\n");
888 return -EINVAL;
889 }
890 /* now translate native cs to GPIO */
891
892 /* get the gpio chip for the base */
893 chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
894 if (!chip)
Martin Sperle34ff012015-03-26 11:08:36 +0100895 return 0;
896
Martin Sperla30a5552015-04-06 17:16:31 +0000897 /* and calculate the real CS */
898 spi->cs_gpio = chip->base + 8 - spi->chip_select;
899
900 /* and set up the "mode" and level */
901 dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n",
902 spi->chip_select, spi->cs_gpio);
903
904 /* set up GPIO as output and pull to the correct level */
905 err = gpio_direction_output(spi->cs_gpio,
906 (spi->mode & SPI_CS_HIGH) ? 0 : 1);
907 if (err) {
908 dev_err(&spi->dev,
909 "could not set CS%i gpio %i as output: %i",
910 spi->chip_select, spi->cs_gpio, err);
911 return err;
912 }
Martin Sperla30a5552015-04-06 17:16:31 +0000913
914 return 0;
Chris Bootf8043872013-03-11 21:38:24 -0600915}
916
917static int bcm2835_spi_probe(struct platform_device *pdev)
918{
919 struct spi_master *master;
920 struct bcm2835_spi *bs;
921 struct resource *res;
922 int err;
923
924 master = spi_alloc_master(&pdev->dev, sizeof(*bs));
925 if (!master) {
926 dev_err(&pdev->dev, "spi_alloc_master() failed\n");
927 return -ENOMEM;
928 }
929
930 platform_set_drvdata(pdev, master);
931
932 master->mode_bits = BCM2835_SPI_MODE_BITS;
Axel Linc2b6a3a2013-08-05 08:43:02 +0800933 master->bits_per_word_mask = SPI_BPW_MASK(8);
Chris Bootf8043872013-03-11 21:38:24 -0600934 master->num_chipselect = 3;
Martin Sperle34ff012015-03-26 11:08:36 +0100935 master->setup = bcm2835_spi_setup;
Martin Sperle34ff012015-03-26 11:08:36 +0100936 master->transfer_one = bcm2835_spi_transfer_one;
937 master->handle_err = bcm2835_spi_handle_err;
Martin Sperlacace732015-07-28 14:03:12 +0000938 master->prepare_message = bcm2835_spi_prepare_message;
Chris Bootf8043872013-03-11 21:38:24 -0600939 master->dev.of_node = pdev->dev.of_node;
940
941 bs = spi_master_get_devdata(master);
942
Chris Bootf8043872013-03-11 21:38:24 -0600943 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Laurent Navet2d6e75e2013-05-02 14:13:30 +0200944 bs->regs = devm_ioremap_resource(&pdev->dev, res);
945 if (IS_ERR(bs->regs)) {
946 err = PTR_ERR(bs->regs);
Chris Bootf8043872013-03-11 21:38:24 -0600947 goto out_master_put;
948 }
949
950 bs->clk = devm_clk_get(&pdev->dev, NULL);
951 if (IS_ERR(bs->clk)) {
952 err = PTR_ERR(bs->clk);
953 dev_err(&pdev->dev, "could not get clk: %d\n", err);
954 goto out_master_put;
955 }
956
Martin Sperlddf0e1c2015-10-15 10:09:11 +0000957 bs->irq = platform_get_irq(pdev, 0);
Chris Bootf8043872013-03-11 21:38:24 -0600958 if (bs->irq <= 0) {
959 dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
960 err = bs->irq ? bs->irq : -ENODEV;
961 goto out_master_put;
962 }
963
964 clk_prepare_enable(bs->clk);
965
Martin Sperlddf0e1c2015-10-15 10:09:11 +0000966 bcm2835_dma_init(master, &pdev->dev);
967
968 /* initialise the hardware with the default polarities */
969 bcm2835_wr(bs, BCM2835_SPI_CS,
970 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
971
Jingoo Han08bc0542013-12-09 19:25:00 +0900972 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
Martin Sperl342f9482015-03-20 15:26:11 +0100973 dev_name(&pdev->dev), master);
Chris Bootf8043872013-03-11 21:38:24 -0600974 if (err) {
975 dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
976 goto out_clk_disable;
977 }
978
Jingoo Han247263d2013-09-24 13:23:00 +0900979 err = devm_spi_register_master(&pdev->dev, master);
Chris Bootf8043872013-03-11 21:38:24 -0600980 if (err) {
981 dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
Jingoo Han08bc0542013-12-09 19:25:00 +0900982 goto out_clk_disable;
Chris Bootf8043872013-03-11 21:38:24 -0600983 }
984
985 return 0;
986
Chris Bootf8043872013-03-11 21:38:24 -0600987out_clk_disable:
988 clk_disable_unprepare(bs->clk);
989out_master_put:
990 spi_master_put(master);
991 return err;
992}
993
994static int bcm2835_spi_remove(struct platform_device *pdev)
995{
Wei Yongjune0b35b82013-11-15 15:43:27 +0800996 struct spi_master *master = platform_get_drvdata(pdev);
Chris Bootf8043872013-03-11 21:38:24 -0600997 struct bcm2835_spi *bs = spi_master_get_devdata(master);
998
Chris Bootf8043872013-03-11 21:38:24 -0600999 /* Clear FIFOs, and disable the HW block */
1000 bcm2835_wr(bs, BCM2835_SPI_CS,
1001 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
1002
1003 clk_disable_unprepare(bs->clk);
Chris Bootf8043872013-03-11 21:38:24 -06001004
Martin Sperl3ecd37e2015-05-10 20:47:28 +00001005 bcm2835_dma_release(master);
1006
Chris Bootf8043872013-03-11 21:38:24 -06001007 return 0;
1008}
1009
1010static const struct of_device_id bcm2835_spi_match[] = {
1011 { .compatible = "brcm,bcm2835-spi", },
1012 {}
1013};
1014MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
1015
1016static struct platform_driver bcm2835_spi_driver = {
1017 .driver = {
1018 .name = DRV_NAME,
Chris Bootf8043872013-03-11 21:38:24 -06001019 .of_match_table = bcm2835_spi_match,
1020 },
1021 .probe = bcm2835_spi_probe,
1022 .remove = bcm2835_spi_remove,
1023};
1024module_platform_driver(bcm2835_spi_driver);
1025
1026MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
1027MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
Stefan Wahren22bf6cd2018-10-23 13:06:08 +02001028MODULE_LICENSE("GPL");