blob: 42314a321d30c201f483f031342daf95db5db421 [file] [log] [blame]
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001/*
2 * Copyright (C) 2009 Texas Instruments.
Brian Niebuhr43abb112010-10-06 18:34:47 +05303 * Copyright (C) 2010 EF Johnson Technologies
Sandeep Paulraj358934a2009-12-16 22:02:18 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/gpio.h>
23#include <linux/module.h>
24#include <linux/delay.h>
25#include <linux/platform_device.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/dma-mapping.h>
29#include <linux/spi/spi.h>
30#include <linux/spi/spi_bitbang.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Sandeep Paulraj358934a2009-12-16 22:02:18 +000032
33#include <mach/spi.h>
34#include <mach/edma.h>
35
36#define SPI_NO_RESOURCE ((resource_size_t)-1)
37
38#define SPI_MAX_CHIPSELECT 2
39
40#define CS_DEFAULT 0xFF
41
Sandeep Paulraj358934a2009-12-16 22:02:18 +000042#define SPIFMT_PHASE_MASK BIT(16)
43#define SPIFMT_POLARITY_MASK BIT(17)
44#define SPIFMT_DISTIMER_MASK BIT(18)
45#define SPIFMT_SHIFTDIR_MASK BIT(20)
46#define SPIFMT_WAITENA_MASK BIT(21)
47#define SPIFMT_PARITYENA_MASK BIT(22)
48#define SPIFMT_ODD_PARITY_MASK BIT(23)
49#define SPIFMT_WDELAY_MASK 0x3f000000u
50#define SPIFMT_WDELAY_SHIFT 24
Brian Niebuhr7fe00922010-08-13 13:27:23 +053051#define SPIFMT_PRESCALE_SHIFT 8
Sandeep Paulraj358934a2009-12-16 22:02:18 +000052
Sandeep Paulraj358934a2009-12-16 22:02:18 +000053/* SPIPC0 */
54#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
55#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
56#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
57#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000058
59#define SPIINT_MASKALL 0x0101035F
Brian Niebuhre0d205e2010-09-02 16:52:06 +053060#define SPIINT_MASKINT 0x0000015F
61#define SPI_INTLVL_1 0x000001FF
62#define SPI_INTLVL_0 0x00000000
Sandeep Paulraj358934a2009-12-16 22:02:18 +000063
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +053064/* SPIDAT1 (upper 16 bit defines) */
65#define SPIDAT1_CSHOLD_MASK BIT(12)
66
67/* SPIGCR1 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000068#define SPIGCR1_CLKMOD_MASK BIT(1)
69#define SPIGCR1_MASTER_MASK BIT(0)
Brian Niebuhr3f27b572010-10-06 18:25:43 +053070#define SPIGCR1_POWERDOWN_MASK BIT(8)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000071#define SPIGCR1_LOOPBACK_MASK BIT(16)
Sekhar Nori8e206f12010-08-20 16:20:49 +053072#define SPIGCR1_SPIENA_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000073
74/* SPIBUF */
75#define SPIBUF_TXFULL_MASK BIT(29)
76#define SPIBUF_RXEMPTY_MASK BIT(31)
77
Brian Niebuhr7abbf232010-08-19 15:07:38 +053078/* SPIDELAY */
79#define SPIDELAY_C2TDELAY_SHIFT 24
80#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
81#define SPIDELAY_T2CDELAY_SHIFT 16
82#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
83#define SPIDELAY_T2EDELAY_SHIFT 8
84#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
85#define SPIDELAY_C2EDELAY_SHIFT 0
86#define SPIDELAY_C2EDELAY_MASK 0xFF
87
Sandeep Paulraj358934a2009-12-16 22:02:18 +000088/* Error Masks */
89#define SPIFLG_DLEN_ERR_MASK BIT(0)
90#define SPIFLG_TIMEOUT_MASK BIT(1)
91#define SPIFLG_PARERR_MASK BIT(2)
92#define SPIFLG_DESYNC_MASK BIT(3)
93#define SPIFLG_BITERR_MASK BIT(4)
94#define SPIFLG_OVRRUN_MASK BIT(6)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000095#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
Brian Niebuhr839c9962010-08-23 16:39:19 +053096#define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
97 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
98 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
99 | SPIFLG_OVRRUN_MASK)
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000100
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000101#define SPIINT_DMA_REQ_EN BIT(16)
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000102
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000103/* SPI Controller registers */
104#define SPIGCR0 0x00
105#define SPIGCR1 0x04
106#define SPIINT 0x08
107#define SPILVL 0x0c
108#define SPIFLG 0x10
109#define SPIPC0 0x14
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000110#define SPIDAT1 0x3c
111#define SPIBUF 0x40
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000112#define SPIDELAY 0x48
113#define SPIDEF 0x4c
114#define SPIFMT0 0x50
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000115
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000116/* We have 2 DMA channels per CS, one for RX and one for TX */
117struct davinci_spi_dma {
118 int dma_tx_channel;
119 int dma_rx_channel;
Brian Niebuhr523c37e2010-10-04 17:35:34 +0530120 int dummy_param_slot;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000121 enum dma_event_q eventq;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000122};
123
124/* SPI Controller driver's private data. */
125struct davinci_spi {
126 struct spi_bitbang bitbang;
127 struct clk *clk;
128
129 u8 version;
130 resource_size_t pbase;
131 void __iomem *base;
132 size_t region_size;
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530133 u32 irq;
134 struct completion done;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000135
136 const void *tx;
137 void *rx;
Brian Niebuhre91c6592010-10-01 10:29:29 +0530138#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1)
139 u8 rx_tmp_buf[SPI_TMP_BUFSZ];
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530140 int rcount;
141 int wcount;
Brian Niebuhr96fd8812010-09-27 22:23:23 +0530142 struct davinci_spi_dma dma_channels;
Brian Niebuhr778e2612010-09-03 15:15:06 +0530143 struct davinci_spi_platform_data *pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000144
145 void (*get_rx)(u32 rx_data, struct davinci_spi *);
146 u32 (*get_tx)(struct davinci_spi *);
147
Brian Niebuhrcda987e2010-08-19 16:16:28 +0530148 u8 bytes_per_word[SPI_MAX_CHIPSELECT];
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000149};
150
Brian Niebuhr53a31b02010-08-16 15:05:51 +0530151static struct davinci_spi_config davinci_spi_default_cfg;
152
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000153static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
154{
Brian Niebuhr53d454a12010-08-19 17:04:25 +0530155 if (davinci_spi->rx) {
156 u8 *rx = davinci_spi->rx;
157 *rx++ = (u8)data;
158 davinci_spi->rx = rx;
159 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000160}
161
162static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
163{
Brian Niebuhr53d454a12010-08-19 17:04:25 +0530164 if (davinci_spi->rx) {
165 u16 *rx = davinci_spi->rx;
166 *rx++ = (u16)data;
167 davinci_spi->rx = rx;
168 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000169}
170
171static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
172{
Brian Niebuhr53d454a12010-08-19 17:04:25 +0530173 u32 data = 0;
174 if (davinci_spi->tx) {
175 const u8 *tx = davinci_spi->tx;
176 data = *tx++;
177 davinci_spi->tx = tx;
178 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000179 return data;
180}
181
182static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
183{
Brian Niebuhr53d454a12010-08-19 17:04:25 +0530184 u32 data = 0;
185 if (davinci_spi->tx) {
186 const u16 *tx = davinci_spi->tx;
187 data = *tx++;
188 davinci_spi->tx = tx;
189 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000190 return data;
191}
192
193static inline void set_io_bits(void __iomem *addr, u32 bits)
194{
195 u32 v = ioread32(addr);
196
197 v |= bits;
198 iowrite32(v, addr);
199}
200
201static inline void clear_io_bits(void __iomem *addr, u32 bits)
202{
203 u32 v = ioread32(addr);
204
205 v &= ~bits;
206 iowrite32(v, addr);
207}
208
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000209/*
210 * Interface to control the chip select signal
211 */
212static void davinci_spi_chipselect(struct spi_device *spi, int value)
213{
214 struct davinci_spi *davinci_spi;
215 struct davinci_spi_platform_data *pdata;
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530216 u8 chip_sel = spi->chip_select;
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +0530217 u16 spidat1_cfg = CS_DEFAULT;
Brian Niebuhr23853972010-08-13 10:57:44 +0530218 bool gpio_chipsel = false;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000219
220 davinci_spi = spi_master_get_devdata(spi->master);
221 pdata = davinci_spi->pdata;
222
Brian Niebuhr23853972010-08-13 10:57:44 +0530223 if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
224 pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
225 gpio_chipsel = true;
226
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000227 /*
228 * Board specific chip select logic decides the polarity and cs
229 * line for the controller
230 */
Brian Niebuhr23853972010-08-13 10:57:44 +0530231 if (gpio_chipsel) {
232 if (value == BITBANG_CS_ACTIVE)
233 gpio_set_value(pdata->chip_sel[chip_sel], 0);
234 else
235 gpio_set_value(pdata->chip_sel[chip_sel], 1);
236 } else {
237 if (value == BITBANG_CS_ACTIVE) {
238 spidat1_cfg |= SPIDAT1_CSHOLD_MASK;
239 spidat1_cfg &= ~(0x1 << chip_sel);
240 }
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530241
Brian Niebuhr23853972010-08-13 10:57:44 +0530242 iowrite16(spidat1_cfg, davinci_spi->base + SPIDAT1 + 2);
243 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000244}
245
246/**
Brian Niebuhr7fe00922010-08-13 13:27:23 +0530247 * davinci_spi_get_prescale - Calculates the correct prescale value
248 * @maxspeed_hz: the maximum rate the SPI clock can run at
249 *
250 * This function calculates the prescale value that generates a clock rate
251 * less than or equal to the specified maximum.
252 *
253 * Returns: calculated prescale - 1 for easy programming into SPI registers
254 * or negative error number if valid prescalar cannot be updated.
255 */
256static inline int davinci_spi_get_prescale(struct davinci_spi *davinci_spi,
257 u32 max_speed_hz)
258{
259 int ret;
260
261 ret = DIV_ROUND_UP(clk_get_rate(davinci_spi->clk), max_speed_hz);
262
263 if (ret < 3 || ret > 256)
264 return -EINVAL;
265
266 return ret - 1;
267}
268
269/**
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000270 * davinci_spi_setup_transfer - This functions will determine transfer method
271 * @spi: spi device on which data transfer to be done
272 * @t: spi transfer in which transfer info is filled
273 *
274 * This function determines data transfer method (8/16/32 bit transfer).
275 * It will also set the SPI Clock Control register according to
276 * SPI slave device freq.
277 */
278static int davinci_spi_setup_transfer(struct spi_device *spi,
279 struct spi_transfer *t)
280{
281
282 struct davinci_spi *davinci_spi;
Brian Niebuhr25f33512010-08-19 12:15:22 +0530283 struct davinci_spi_config *spicfg;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000284 u8 bits_per_word = 0;
Brian Niebuhr25f33512010-08-19 12:15:22 +0530285 u32 hz = 0, spifmt = 0, prescale = 0;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000286
287 davinci_spi = spi_master_get_devdata(spi->master);
Brian Niebuhr25f33512010-08-19 12:15:22 +0530288 spicfg = (struct davinci_spi_config *)spi->controller_data;
289 if (!spicfg)
290 spicfg = &davinci_spi_default_cfg;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000291
292 if (t) {
293 bits_per_word = t->bits_per_word;
294 hz = t->speed_hz;
295 }
296
297 /* if bits_per_word is not set then set it default */
298 if (!bits_per_word)
299 bits_per_word = spi->bits_per_word;
300
301 /*
302 * Assign function pointer to appropriate transfer method
303 * 8bit, 16bit or 32bit transfer
304 */
305 if (bits_per_word <= 8 && bits_per_word >= 2) {
306 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
307 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
Brian Niebuhrcda987e2010-08-19 16:16:28 +0530308 davinci_spi->bytes_per_word[spi->chip_select] = 1;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000309 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
310 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
311 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
Brian Niebuhrcda987e2010-08-19 16:16:28 +0530312 davinci_spi->bytes_per_word[spi->chip_select] = 2;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000313 } else
314 return -EINVAL;
315
316 if (!hz)
317 hz = spi->max_speed_hz;
318
Brian Niebuhr25f33512010-08-19 12:15:22 +0530319 /* Set up SPIFMTn register, unique to this chipselect. */
320
Brian Niebuhr7fe00922010-08-13 13:27:23 +0530321 prescale = davinci_spi_get_prescale(davinci_spi, hz);
322 if (prescale < 0)
323 return prescale;
324
Brian Niebuhr25f33512010-08-19 12:15:22 +0530325 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000326
Brian Niebuhr25f33512010-08-19 12:15:22 +0530327 if (spi->mode & SPI_LSB_FIRST)
328 spifmt |= SPIFMT_SHIFTDIR_MASK;
329
330 if (spi->mode & SPI_CPOL)
331 spifmt |= SPIFMT_POLARITY_MASK;
332
333 if (!(spi->mode & SPI_CPHA))
334 spifmt |= SPIFMT_PHASE_MASK;
335
336 /*
337 * Version 1 hardware supports two basic SPI modes:
338 * - Standard SPI mode uses 4 pins, with chipselect
339 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
340 * (distinct from SPI_3WIRE, with just one data wire;
341 * or similar variants without MOSI or without MISO)
342 *
343 * Version 2 hardware supports an optional handshaking signal,
344 * so it can support two more modes:
345 * - 5 pin SPI variant is standard SPI plus SPI_READY
346 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
347 */
348
349 if (davinci_spi->version == SPI_VERSION_2) {
350
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530351 u32 delay = 0;
352
Brian Niebuhr25f33512010-08-19 12:15:22 +0530353 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
354 & SPIFMT_WDELAY_MASK);
355
356 if (spicfg->odd_parity)
357 spifmt |= SPIFMT_ODD_PARITY_MASK;
358
359 if (spicfg->parity_enable)
360 spifmt |= SPIFMT_PARITYENA_MASK;
361
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530362 if (spicfg->timer_disable) {
Brian Niebuhr25f33512010-08-19 12:15:22 +0530363 spifmt |= SPIFMT_DISTIMER_MASK;
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530364 } else {
365 delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
366 & SPIDELAY_C2TDELAY_MASK;
367 delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
368 & SPIDELAY_T2CDELAY_MASK;
369 }
Brian Niebuhr25f33512010-08-19 12:15:22 +0530370
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530371 if (spi->mode & SPI_READY) {
Brian Niebuhr25f33512010-08-19 12:15:22 +0530372 spifmt |= SPIFMT_WAITENA_MASK;
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530373 delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
374 & SPIDELAY_T2EDELAY_MASK;
375 delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
376 & SPIDELAY_C2EDELAY_MASK;
377 }
378
379 iowrite32(delay, davinci_spi->base + SPIDELAY);
Brian Niebuhr25f33512010-08-19 12:15:22 +0530380 }
381
382 iowrite32(spifmt, davinci_spi->base + SPIFMT0);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000383
384 return 0;
385}
386
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000387/**
388 * davinci_spi_setup - This functions will set default transfer method
389 * @spi: spi device on which data transfer to be done
390 *
391 * This functions sets the default transfer method.
392 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000393static int davinci_spi_setup(struct spi_device *spi)
394{
Brian Niebuhrb23a5d42010-09-24 18:53:32 +0530395 int retval = 0;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000396 struct davinci_spi *davinci_spi;
Brian Niebuhrbe884712010-09-03 12:15:28 +0530397 struct davinci_spi_platform_data *pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000398
399 davinci_spi = spi_master_get_devdata(spi->master);
Brian Niebuhrbe884712010-09-03 12:15:28 +0530400 pdata = davinci_spi->pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000401
402 /* if bits per word length is zero then set it default 8 */
403 if (!spi->bits_per_word)
404 spi->bits_per_word = 8;
405
Brian Niebuhrbe884712010-09-03 12:15:28 +0530406 if (!(spi->mode & SPI_NO_CS)) {
407 if ((pdata->chip_sel == NULL) ||
408 (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS))
409 set_io_bits(davinci_spi->base + SPIPC0,
410 1 << spi->chip_select);
411
412 }
413
414 if (spi->mode & SPI_READY)
415 set_io_bits(davinci_spi->base + SPIPC0, SPIPC0_SPIENA_MASK);
416
417 if (spi->mode & SPI_LOOP)
418 set_io_bits(davinci_spi->base + SPIGCR1,
419 SPIGCR1_LOOPBACK_MASK);
420 else
421 clear_io_bits(davinci_spi->base + SPIGCR1,
422 SPIGCR1_LOOPBACK_MASK);
423
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000424 return retval;
425}
426
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000427static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
428 int int_status)
429{
430 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
431
432 if (int_status & SPIFLG_TIMEOUT_MASK) {
433 dev_dbg(sdev, "SPI Time-out Error\n");
434 return -ETIMEDOUT;
435 }
436 if (int_status & SPIFLG_DESYNC_MASK) {
437 dev_dbg(sdev, "SPI Desynchronization Error\n");
438 return -EIO;
439 }
440 if (int_status & SPIFLG_BITERR_MASK) {
441 dev_dbg(sdev, "SPI Bit error\n");
442 return -EIO;
443 }
444
445 if (davinci_spi->version == SPI_VERSION_2) {
446 if (int_status & SPIFLG_DLEN_ERR_MASK) {
447 dev_dbg(sdev, "SPI Data Length Error\n");
448 return -EIO;
449 }
450 if (int_status & SPIFLG_PARERR_MASK) {
451 dev_dbg(sdev, "SPI Parity Error\n");
452 return -EIO;
453 }
454 if (int_status & SPIFLG_OVRRUN_MASK) {
455 dev_dbg(sdev, "SPI Data Overrun error\n");
456 return -EIO;
457 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000458 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
459 dev_dbg(sdev, "SPI Buffer Init Active\n");
460 return -EBUSY;
461 }
462 }
463
464 return 0;
465}
466
467/**
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530468 * davinci_spi_process_events - check for and handle any SPI controller events
469 * @davinci_spi: the controller data
470 *
471 * This function will check the SPIFLG register and handle any events that are
472 * detected there
473 */
474static int davinci_spi_process_events(struct davinci_spi *davinci_spi)
475{
476 u32 buf, status, errors = 0, data1_reg_val;
477
478 buf = ioread32(davinci_spi->base + SPIBUF);
479
480 if (davinci_spi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
481 davinci_spi->get_rx(buf & 0xFFFF, davinci_spi);
482 davinci_spi->rcount--;
483 }
484
485 status = ioread32(davinci_spi->base + SPIFLG);
486
487 if (unlikely(status & SPIFLG_ERROR_MASK)) {
488 errors = status & SPIFLG_ERROR_MASK;
489 goto out;
490 }
491
492 if (davinci_spi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
493 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
494 davinci_spi->wcount--;
495 data1_reg_val &= ~0xFFFF;
496 data1_reg_val |= 0xFFFF & davinci_spi->get_tx(davinci_spi);
497 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
498 }
499
500out:
501 return errors;
502}
503
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530504static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
505{
506 struct davinci_spi *davinci_spi = data;
507 struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels;
508
509 edma_stop(lch);
510
511 if (status == DMA_COMPLETE) {
512 if (lch == davinci_spi_dma->dma_rx_channel)
513 davinci_spi->rcount = 0;
514 if (lch == davinci_spi_dma->dma_tx_channel)
515 davinci_spi->wcount = 0;
516 }
517
518 if ((!davinci_spi->wcount && !davinci_spi->rcount) ||
519 (status != DMA_COMPLETE))
520 complete(&davinci_spi->done);
521}
522
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530523/**
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000524 * davinci_spi_bufs - functions which will handle transfer data
525 * @spi: spi device on which data transfer to be done
526 * @t: spi transfer in which transfer info is filled
527 *
528 * This function will put data to be transferred into data register
529 * of SPI controller and then wait until the completion will be marked
530 * by the IRQ Handler.
531 */
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530532static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000533{
534 struct davinci_spi *davinci_spi;
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530535 int data_type, ret;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000536 u32 tx_data, data1_reg_val;
Brian Niebuhr839c9962010-08-23 16:39:19 +0530537 u32 errors = 0;
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530538 struct davinci_spi_config *spicfg;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000539 struct davinci_spi_platform_data *pdata;
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530540 unsigned uninitialized_var(rx_buf_count);
541 struct device *sdev;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000542
543 davinci_spi = spi_master_get_devdata(spi->master);
544 pdata = davinci_spi->pdata;
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530545 spicfg = (struct davinci_spi_config *)spi->controller_data;
546 if (!spicfg)
547 spicfg = &davinci_spi_default_cfg;
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530548 sdev = davinci_spi->bitbang.master->dev.parent;
549
550 /* convert len to words based on bits_per_word */
551 data_type = davinci_spi->bytes_per_word[spi->chip_select];
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000552
553 davinci_spi->tx = t->tx_buf;
554 davinci_spi->rx = t->rx_buf;
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530555 davinci_spi->wcount = t->len / data_type;
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530556 davinci_spi->rcount = davinci_spi->wcount;
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530557
Brian Niebuhr839c9962010-08-23 16:39:19 +0530558 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
559
Brian Niebuhr3f27b572010-10-06 18:25:43 +0530560 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000561 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
562
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530563 INIT_COMPLETION(davinci_spi->done);
564
565 if (spicfg->io_type == SPI_IO_TYPE_INTR)
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530566 set_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKINT);
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530567
568 if (spicfg->io_type != SPI_IO_TYPE_DMA) {
569 /* start the transfer */
570 davinci_spi->wcount--;
571 tx_data = davinci_spi->get_tx(davinci_spi);
572 data1_reg_val &= 0xFFFF0000;
573 data1_reg_val |= tx_data & 0xFFFF;
574 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
575 } else {
576 struct davinci_spi_dma *davinci_spi_dma;
577 unsigned long tx_reg, rx_reg;
578 struct edmacc_param param;
579 void *rx_buf;
580
581 davinci_spi_dma = &davinci_spi->dma_channels;
582
583 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
584 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
585
586 /*
587 * Transmit DMA setup
588 *
589 * If there is transmit data, map the transmit buffer, set it
590 * as the source of data and set the source B index to data
591 * size. If there is no transmit data, set the transmit register
592 * as the source of data, and set the source B index to zero.
593 *
594 * The destination is always the transmit register itself. And
595 * the destination never increments.
596 */
597
598 if (t->tx_buf) {
599 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
600 davinci_spi->wcount, DMA_TO_DEVICE);
601 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
602 dev_dbg(sdev, "Unable to DMA map %d bytes"
603 "TX buffer\n",
604 davinci_spi->wcount);
605 return -ENOMEM;
606 }
607 }
608
609 param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_tx_channel);
610 param.src = t->tx_buf ? t->tx_dma : tx_reg;
611 param.a_b_cnt = davinci_spi->wcount << 16 | data_type;
612 param.dst = tx_reg;
613 param.src_dst_bidx = t->tx_buf ? data_type : 0;
614 param.link_bcntrld = 0xffff;
615 param.src_dst_cidx = 0;
616 param.ccnt = 1;
617 edma_write_slot(davinci_spi_dma->dma_tx_channel, &param);
618 edma_link(davinci_spi_dma->dma_tx_channel,
619 davinci_spi_dma->dummy_param_slot);
620
621 /*
622 * Receive DMA setup
623 *
624 * If there is receive buffer, use it to receive data. If there
625 * is none provided, use a temporary receive buffer. Set the
626 * destination B index to 0 so effectively only one byte is used
627 * in the temporary buffer (address does not increment).
628 *
629 * The source of receive data is the receive data register. The
630 * source address never increments.
631 */
632
633 if (t->rx_buf) {
634 rx_buf = t->rx_buf;
635 rx_buf_count = davinci_spi->rcount;
636 } else {
637 rx_buf = davinci_spi->rx_tmp_buf;
638 rx_buf_count = sizeof(davinci_spi->rx_tmp_buf);
639 }
640
641 t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count,
642 DMA_FROM_DEVICE);
643 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
644 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
645 rx_buf_count);
646 if (t->tx_buf)
647 dma_unmap_single(NULL, t->tx_dma,
648 davinci_spi->wcount,
649 DMA_TO_DEVICE);
650 return -ENOMEM;
651 }
652
653 param.opt = TCINTEN | EDMA_TCC(davinci_spi_dma->dma_rx_channel);
654 param.src = rx_reg;
655 param.a_b_cnt = davinci_spi->rcount << 16 | data_type;
656 param.dst = t->rx_dma;
657 param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
658 param.link_bcntrld = 0xffff;
659 param.src_dst_cidx = 0;
660 param.ccnt = 1;
661 edma_write_slot(davinci_spi_dma->dma_rx_channel, &param);
662
663 if (pdata->cshold_bug)
664 iowrite16(data1_reg_val >> 16,
665 davinci_spi->base + SPIDAT1 + 2);
666
667 edma_start(davinci_spi_dma->dma_rx_channel);
668 edma_start(davinci_spi_dma->dma_tx_channel);
669 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530670 }
Brian Niebuhrcf90fe72010-08-20 17:02:49 +0530671
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530672 /* Wait for the transfer to complete */
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530673 if (spicfg->io_type != SPI_IO_TYPE_POLL) {
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530674 wait_for_completion_interruptible(&(davinci_spi->done));
675 } else {
676 while (davinci_spi->rcount > 0 || davinci_spi->wcount > 0) {
677 errors = davinci_spi_process_events(davinci_spi);
678 if (errors)
679 break;
680 cpu_relax();
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000681 }
682 }
683
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530684 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530685 if (spicfg->io_type == SPI_IO_TYPE_DMA) {
686
687 if (t->tx_buf)
688 dma_unmap_single(NULL, t->tx_dma, davinci_spi->wcount,
689 DMA_TO_DEVICE);
690
691 dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
692 DMA_FROM_DEVICE);
693
694 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
695 }
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530696
Brian Niebuhr3f27b572010-10-06 18:25:43 +0530697 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
698 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
699
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000700 /*
701 * Check for bit error, desync error,parity error,timeout error and
702 * receive overflow errors
703 */
Brian Niebuhr839c9962010-08-23 16:39:19 +0530704 if (errors) {
705 ret = davinci_spi_check_error(davinci_spi, errors);
706 WARN(!ret, "%s: error reported but no error found!\n",
707 dev_name(&spi->dev));
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000708 return ret;
Brian Niebuhr839c9962010-08-23 16:39:19 +0530709 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000710
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530711 if (davinci_spi->rcount != 0 || davinci_spi->wcount != 0) {
712 dev_err(sdev, "SPI data transfer error\n");
713 return -EIO;
714 }
715
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000716 return t->len;
717}
718
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530719/**
720 * davinci_spi_irq - Interrupt handler for SPI Master Controller
721 * @irq: IRQ number for this SPI Master
722 * @context_data: structure for SPI Master controller davinci_spi
723 *
724 * ISR will determine that interrupt arrives either for READ or WRITE command.
725 * According to command it will do the appropriate action. It will check
726 * transfer length and if it is not zero then dispatch transfer command again.
727 * If transfer length is zero then it will indicate the COMPLETION so that
728 * davinci_spi_bufs function can go ahead.
729 */
730static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
731{
732 struct davinci_spi *davinci_spi = context_data;
733 int status;
734
735 status = davinci_spi_process_events(davinci_spi);
736 if (unlikely(status != 0))
737 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKINT);
738
739 if ((!davinci_spi->rcount && !davinci_spi->wcount) || status)
740 complete(&davinci_spi->done);
741
742 return IRQ_HANDLED;
743}
744
Brian Niebuhr9b189fd72010-10-05 11:38:41 +0530745static int davinci_spi_request_dma(struct davinci_spi *davinci_spi)
Sekhar Nori903ca252010-10-01 14:51:40 +0530746{
747 int r;
Brian Niebuhr9b189fd72010-10-05 11:38:41 +0530748 struct davinci_spi_dma *davinci_spi_dma = &davinci_spi->dma_channels;
Sekhar Nori903ca252010-10-01 14:51:40 +0530749
750 r = edma_alloc_channel(davinci_spi_dma->dma_rx_channel,
Brian Niebuhr6dbd29b2010-10-05 15:43:08 +0530751 davinci_spi_dma_callback, davinci_spi,
Sekhar Nori903ca252010-10-01 14:51:40 +0530752 davinci_spi_dma->eventq);
753 if (r < 0) {
754 pr_err("Unable to request DMA channel for SPI RX\n");
Brian Niebuhr523c37e2010-10-04 17:35:34 +0530755 r = -EAGAIN;
756 goto rx_dma_failed;
Sekhar Nori903ca252010-10-01 14:51:40 +0530757 }
758
759 r = edma_alloc_channel(davinci_spi_dma->dma_tx_channel,
Brian Niebuhr6dbd29b2010-10-05 15:43:08 +0530760 davinci_spi_dma_callback, davinci_spi,
Sekhar Nori903ca252010-10-01 14:51:40 +0530761 davinci_spi_dma->eventq);
762 if (r < 0) {
Sekhar Nori903ca252010-10-01 14:51:40 +0530763 pr_err("Unable to request DMA channel for SPI TX\n");
Brian Niebuhr523c37e2010-10-04 17:35:34 +0530764 r = -EAGAIN;
765 goto tx_dma_failed;
Sekhar Nori903ca252010-10-01 14:51:40 +0530766 }
767
Brian Niebuhr523c37e2010-10-04 17:35:34 +0530768 r = edma_alloc_slot(EDMA_CTLR(davinci_spi_dma->dma_tx_channel),
769 EDMA_SLOT_ANY);
770 if (r < 0) {
771 pr_err("Unable to request SPI TX DMA param slot\n");
772 r = -EAGAIN;
773 goto param_failed;
774 }
775 davinci_spi_dma->dummy_param_slot = r;
776 edma_link(davinci_spi_dma->dummy_param_slot,
777 davinci_spi_dma->dummy_param_slot);
778
Sekhar Nori903ca252010-10-01 14:51:40 +0530779 return 0;
Brian Niebuhr523c37e2010-10-04 17:35:34 +0530780param_failed:
781 edma_free_channel(davinci_spi_dma->dma_tx_channel);
782tx_dma_failed:
783 edma_free_channel(davinci_spi_dma->dma_rx_channel);
784rx_dma_failed:
785 return r;
Sekhar Nori903ca252010-10-01 14:51:40 +0530786}
787
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000788/**
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000789 * davinci_spi_probe - probe function for SPI Master Controller
790 * @pdev: platform_device structure which contains plateform specific data
Brian Niebuhr035540f2010-10-06 18:32:40 +0530791 *
792 * According to Linux Device Model this function will be invoked by Linux
793 * with platform_device struct which contains the device specific info.
794 * This function will map the SPI controller's memory, register IRQ,
795 * Reset SPI controller and setting its registers to default value.
796 * It will invoke spi_bitbang_start to create work queue so that client driver
797 * can register transfer method to work queue.
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000798 */
799static int davinci_spi_probe(struct platform_device *pdev)
800{
801 struct spi_master *master;
802 struct davinci_spi *davinci_spi;
803 struct davinci_spi_platform_data *pdata;
804 struct resource *r, *mem;
805 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
806 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
807 resource_size_t dma_eventq = SPI_NO_RESOURCE;
808 int i = 0, ret = 0;
Brian Niebuhrf34bd4c2010-09-03 11:56:35 +0530809 u32 spipc0;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000810
811 pdata = pdev->dev.platform_data;
812 if (pdata == NULL) {
813 ret = -ENODEV;
814 goto err;
815 }
816
817 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
818 if (master == NULL) {
819 ret = -ENOMEM;
820 goto err;
821 }
822
823 dev_set_drvdata(&pdev->dev, master);
824
825 davinci_spi = spi_master_get_devdata(master);
826 if (davinci_spi == NULL) {
827 ret = -ENOENT;
828 goto free_master;
829 }
830
831 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
832 if (r == NULL) {
833 ret = -ENOENT;
834 goto free_master;
835 }
836
837 davinci_spi->pbase = r->start;
838 davinci_spi->region_size = resource_size(r);
839 davinci_spi->pdata = pdata;
840
841 mem = request_mem_region(r->start, davinci_spi->region_size,
842 pdev->name);
843 if (mem == NULL) {
844 ret = -EBUSY;
845 goto free_master;
846 }
847
Sekhar Nori50356dd2010-10-08 15:27:26 +0530848 davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000849 if (davinci_spi->base == NULL) {
850 ret = -ENOMEM;
851 goto release_region;
852 }
853
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530854 davinci_spi->irq = platform_get_irq(pdev, 0);
855 if (davinci_spi->irq <= 0) {
856 ret = -EINVAL;
857 goto unmap_io;
858 }
859
860 ret = request_irq(davinci_spi->irq, davinci_spi_irq, 0,
861 dev_name(&pdev->dev), davinci_spi);
862 if (ret)
863 goto unmap_io;
864
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000865 davinci_spi->bitbang.master = spi_master_get(master);
866 if (davinci_spi->bitbang.master == NULL) {
867 ret = -ENODEV;
Brian Niebuhrd3f71412010-09-29 12:31:54 +0530868 goto irq_free;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000869 }
870
871 davinci_spi->clk = clk_get(&pdev->dev, NULL);
872 if (IS_ERR(davinci_spi->clk)) {
873 ret = -ENODEV;
874 goto put_master;
875 }
876 clk_enable(davinci_spi->clk);
877
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000878 master->bus_num = pdev->id;
879 master->num_chipselect = pdata->num_chipselect;
880 master->setup = davinci_spi_setup;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000881
882 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
883 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
884
885 davinci_spi->version = pdata->version;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000886
887 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
888 if (davinci_spi->version == SPI_VERSION_2)
889 davinci_spi->bitbang.flags |= SPI_READY;
890
Sekhar Nori903ca252010-10-01 14:51:40 +0530891 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
892 if (r)
893 dma_rx_chan = r->start;
894 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
895 if (r)
896 dma_tx_chan = r->start;
897 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
898 if (r)
899 dma_eventq = r->start;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000900
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530901 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs;
Sekhar Nori903ca252010-10-01 14:51:40 +0530902 if (dma_rx_chan != SPI_NO_RESOURCE &&
903 dma_tx_chan != SPI_NO_RESOURCE &&
904 dma_eventq != SPI_NO_RESOURCE) {
905 davinci_spi->dma_channels.dma_rx_channel = dma_rx_chan;
906 davinci_spi->dma_channels.dma_tx_channel = dma_tx_chan;
Brian Niebuhr96fd8812010-09-27 22:23:23 +0530907 davinci_spi->dma_channels.eventq = dma_eventq;
908
Brian Niebuhr9b189fd72010-10-05 11:38:41 +0530909 ret = davinci_spi_request_dma(davinci_spi);
Sekhar Nori903ca252010-10-01 14:51:40 +0530910 if (ret)
911 goto free_clk;
912
Brian Niebuhr87467bd2010-10-06 17:03:10 +0530913 dev_info(&pdev->dev, "DMA: supported\n");
914 dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
915 "event queue: %d\n", dma_rx_chan, dma_tx_chan,
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000916 dma_eventq);
917 }
918
919 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
920 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
921
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530922 init_completion(&davinci_spi->done);
923
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000924 /* Reset In/OUT SPI module */
925 iowrite32(0, davinci_spi->base + SPIGCR0);
926 udelay(100);
927 iowrite32(1, davinci_spi->base + SPIGCR0);
928
Brian Niebuhrbe884712010-09-03 12:15:28 +0530929 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
Brian Niebuhrf34bd4c2010-09-03 11:56:35 +0530930 spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
931 iowrite32(spipc0, davinci_spi->base + SPIPC0);
932
Brian Niebuhr23853972010-08-13 10:57:44 +0530933 /* initialize chip selects */
934 if (pdata->chip_sel) {
935 for (i = 0; i < pdata->num_chipselect; i++) {
936 if (pdata->chip_sel[i] != SPI_INTERN_CS)
937 gpio_direction_output(pdata->chip_sel[i], 1);
938 }
939 }
940
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530941 if (pdata->intr_line)
942 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
943 else
944 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
945
Brian Niebuhr843a7132010-08-12 12:49:05 +0530946 iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
947
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000948 /* master mode default */
Brian Niebuhr3409e402010-10-06 18:13:31 +0530949 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000950 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
Brian Niebuhr3f27b572010-10-06 18:25:43 +0530951 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000952
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000953 ret = spi_bitbang_start(&davinci_spi->bitbang);
954 if (ret)
Sekhar Nori903ca252010-10-01 14:51:40 +0530955 goto free_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000956
Brian Niebuhr3b740b12010-09-03 14:50:07 +0530957 dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000958
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000959 return ret;
960
Sekhar Nori903ca252010-10-01 14:51:40 +0530961free_dma:
962 edma_free_channel(davinci_spi->dma_channels.dma_tx_channel);
963 edma_free_channel(davinci_spi->dma_channels.dma_rx_channel);
Brian Niebuhr523c37e2010-10-04 17:35:34 +0530964 edma_free_slot(davinci_spi->dma_channels.dummy_param_slot);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000965free_clk:
966 clk_disable(davinci_spi->clk);
967 clk_put(davinci_spi->clk);
968put_master:
969 spi_master_put(master);
Brian Niebuhre0d205e2010-09-02 16:52:06 +0530970irq_free:
971 free_irq(davinci_spi->irq, davinci_spi);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000972unmap_io:
973 iounmap(davinci_spi->base);
974release_region:
975 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
976free_master:
977 kfree(master);
978err:
979 return ret;
980}
981
982/**
983 * davinci_spi_remove - remove function for SPI Master Controller
984 * @pdev: platform_device structure which contains plateform specific data
985 *
986 * This function will do the reverse action of davinci_spi_probe function
987 * It will free the IRQ and SPI controller's memory region.
988 * It will also call spi_bitbang_stop to destroy the work queue which was
989 * created by spi_bitbang_start.
990 */
991static int __exit davinci_spi_remove(struct platform_device *pdev)
992{
993 struct davinci_spi *davinci_spi;
994 struct spi_master *master;
995
996 master = dev_get_drvdata(&pdev->dev);
997 davinci_spi = spi_master_get_devdata(master);
998
999 spi_bitbang_stop(&davinci_spi->bitbang);
1000
1001 clk_disable(davinci_spi->clk);
1002 clk_put(davinci_spi->clk);
1003 spi_master_put(master);
Brian Niebuhre0d205e2010-09-02 16:52:06 +05301004 free_irq(davinci_spi->irq, davinci_spi);
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001005 iounmap(davinci_spi->base);
1006 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1007
1008 return 0;
1009}
1010
1011static struct platform_driver davinci_spi_driver = {
Brian Niebuhrd8c174c2010-10-06 18:47:16 +05301012 .driver = {
1013 .name = "spi_davinci",
1014 .owner = THIS_MODULE,
1015 },
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001016 .remove = __exit_p(davinci_spi_remove),
1017};
1018
1019static int __init davinci_spi_init(void)
1020{
1021 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1022}
1023module_init(davinci_spi_init);
1024
1025static void __exit davinci_spi_exit(void)
1026{
1027 platform_driver_unregister(&davinci_spi_driver);
1028}
1029module_exit(davinci_spi_exit);
1030
1031MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1032MODULE_LICENSE("GPL");