blob: 2d80e36f5015967f40ea02a9a17f8ac2f08272cd [file] [log] [blame]
Mika Westerberg011f23a2010-05-06 04:47:04 +00001/*
2 * Driver for Cirrus Logic EP93xx SPI controller.
3 *
Mika Westerberg626a96d2011-05-29 13:10:06 +03004 * Copyright (C) 2010-2011 Mika Westerberg
Mika Westerberg011f23a2010-05-06 04:47:04 +00005 *
6 * Explicit FIFO handling code was inspired by amba-pl022 driver.
7 *
8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
9 *
10 * For more information about the SPI controller see documentation on Cirrus
11 * Logic web site:
12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/io.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/delay.h>
23#include <linux/device.h>
Mika Westerberg626a96d2011-05-29 13:10:06 +030024#include <linux/dmaengine.h>
Mika Westerberg011f23a2010-05-06 04:47:04 +000025#include <linux/bitops.h>
26#include <linux/interrupt.h>
Mika Westerberg5bdb76132011-10-15 21:40:09 +030027#include <linux/module.h>
Mika Westerberg011f23a2010-05-06 04:47:04 +000028#include <linux/platform_device.h>
Mika Westerberg011f23a2010-05-06 04:47:04 +000029#include <linux/sched.h>
Mika Westerberg626a96d2011-05-29 13:10:06 +030030#include <linux/scatterlist.h>
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -070031#include <linux/gpio.h>
Mika Westerberg011f23a2010-05-06 04:47:04 +000032#include <linux/spi/spi.h>
33
Arnd Bergmanna3b29242012-08-24 15:12:11 +020034#include <linux/platform_data/dma-ep93xx.h>
35#include <linux/platform_data/spi-ep93xx.h>
Mika Westerberg011f23a2010-05-06 04:47:04 +000036
37#define SSPCR0 0x0000
38#define SSPCR0_MODE_SHIFT 6
39#define SSPCR0_SCR_SHIFT 8
40
41#define SSPCR1 0x0004
42#define SSPCR1_RIE BIT(0)
43#define SSPCR1_TIE BIT(1)
44#define SSPCR1_RORIE BIT(2)
45#define SSPCR1_LBM BIT(3)
46#define SSPCR1_SSE BIT(4)
47#define SSPCR1_MS BIT(5)
48#define SSPCR1_SOD BIT(6)
49
50#define SSPDR 0x0008
51
52#define SSPSR 0x000c
53#define SSPSR_TFE BIT(0)
54#define SSPSR_TNF BIT(1)
55#define SSPSR_RNE BIT(2)
56#define SSPSR_RFF BIT(3)
57#define SSPSR_BSY BIT(4)
58#define SSPCPSR 0x0010
59
60#define SSPIIR 0x0014
61#define SSPIIR_RIS BIT(0)
62#define SSPIIR_TIS BIT(1)
63#define SSPIIR_RORIS BIT(2)
64#define SSPICR SSPIIR
65
66/* timeout in milliseconds */
67#define SPI_TIMEOUT 5
68/* maximum depth of RX/TX FIFO */
69#define SPI_FIFO_SIZE 8
70
71/**
72 * struct ep93xx_spi - EP93xx SPI controller structure
Mika Westerberg011f23a2010-05-06 04:47:04 +000073 * @clk: clock for the controller
H Hartley Sweeten12329782017-08-09 08:51:25 +120074 * @mmio: pointer to ioremap()'d registers
Mika Westerberg626a96d2011-05-29 13:10:06 +030075 * @sspdr_phys: physical address of the SSPDR register
Mika Westerberg011f23a2010-05-06 04:47:04 +000076 * @wait: wait here until given transfer is completed
Mika Westerberg011f23a2010-05-06 04:47:04 +000077 * @current_msg: message that is currently processed (or %NULL if none)
78 * @tx: current byte in transfer to transmit
79 * @rx: current byte in transfer to receive
80 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
81 * frame decreases this level and sending one frame increases it.
Mika Westerberg626a96d2011-05-29 13:10:06 +030082 * @dma_rx: RX DMA channel
83 * @dma_tx: TX DMA channel
84 * @dma_rx_data: RX parameters passed to the DMA engine
85 * @dma_tx_data: TX parameters passed to the DMA engine
86 * @rx_sgt: sg table for RX transfers
87 * @tx_sgt: sg table for TX transfers
88 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
89 * the client
Mika Westerberg011f23a2010-05-06 04:47:04 +000090 */
91struct ep93xx_spi {
Mika Westerberg011f23a2010-05-06 04:47:04 +000092 struct clk *clk;
H Hartley Sweeten12329782017-08-09 08:51:25 +120093 void __iomem *mmio;
Mika Westerberg626a96d2011-05-29 13:10:06 +030094 unsigned long sspdr_phys;
Mika Westerberg011f23a2010-05-06 04:47:04 +000095 struct completion wait;
Mika Westerberg011f23a2010-05-06 04:47:04 +000096 struct spi_message *current_msg;
97 size_t tx;
98 size_t rx;
99 size_t fifo_level;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300100 struct dma_chan *dma_rx;
101 struct dma_chan *dma_tx;
102 struct ep93xx_dma_data dma_rx_data;
103 struct ep93xx_dma_data dma_tx_data;
104 struct sg_table rx_sgt;
105 struct sg_table tx_sgt;
106 void *zeropage;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000107};
108
Mika Westerberg011f23a2010-05-06 04:47:04 +0000109/* converts bits per word to CR0.DSS value */
110#define bits_per_word_to_dss(bpw) ((bpw) - 1)
111
Mika Westerberg011f23a2010-05-06 04:47:04 +0000112/**
113 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
H Hartley Sweeten48738832017-08-09 08:51:29 +1200114 * @master: SPI master
Mika Westerberg011f23a2010-05-06 04:47:04 +0000115 * @rate: desired SPI output clock rate
H Hartley Sweetenf7ef1da2013-07-02 10:10:29 -0700116 * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
117 * @div_scr: pointer to return the scr divider
Mika Westerberg011f23a2010-05-06 04:47:04 +0000118 */
H Hartley Sweeten48738832017-08-09 08:51:29 +1200119static int ep93xx_spi_calc_divisors(struct spi_master *master,
Axel Lin56fc0b42014-02-08 23:52:26 +0800120 u32 rate, u8 *div_cpsr, u8 *div_scr)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000121{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200122 struct ep93xx_spi *espi = spi_master_get_devdata(master);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000123 unsigned long spi_clk_rate = clk_get_rate(espi->clk);
124 int cpsr, scr;
125
126 /*
127 * Make sure that max value is between values supported by the
128 * controller. Note that minimum value is already checked in
H Hartley Sweeten84ddb3c2013-07-08 09:12:37 -0700129 * ep93xx_spi_transfer_one_message().
Mika Westerberg011f23a2010-05-06 04:47:04 +0000130 */
Axel Lin56fc0b42014-02-08 23:52:26 +0800131 rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000132
133 /*
134 * Calculate divisors so that we can get speed according the
135 * following formula:
136 * rate = spi_clock_rate / (cpsr * (1 + scr))
137 *
138 * cpsr must be even number and starts from 2, scr can be any number
139 * between 0 and 255.
140 */
141 for (cpsr = 2; cpsr <= 254; cpsr += 2) {
142 for (scr = 0; scr <= 255; scr++) {
143 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
H Hartley Sweetenf7ef1da2013-07-02 10:10:29 -0700144 *div_scr = (u8)scr;
145 *div_cpsr = (u8)cpsr;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000146 return 0;
147 }
148 }
149 }
150
151 return -EINVAL;
152}
153
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700154static void ep93xx_spi_cs_control(struct spi_device *spi, bool enable)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000155{
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700156 if (spi->mode & SPI_CS_HIGH)
157 enable = !enable;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000158
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700159 if (gpio_is_valid(spi->cs_gpio))
160 gpio_set_value(spi->cs_gpio, !enable);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000161}
162
H Hartley Sweeten48738832017-08-09 08:51:29 +1200163static int ep93xx_spi_chip_setup(struct spi_master *master,
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700164 struct spi_device *spi,
165 struct spi_transfer *xfer)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000166{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200167 struct ep93xx_spi *espi = spi_master_get_devdata(master);
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700168 u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
H Hartley Sweetenf7ef1da2013-07-02 10:10:29 -0700169 u8 div_cpsr = 0;
170 u8 div_scr = 0;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000171 u16 cr0;
H Hartley Sweetenf7ef1da2013-07-02 10:10:29 -0700172 int err;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000173
H Hartley Sweeten48738832017-08-09 08:51:29 +1200174 err = ep93xx_spi_calc_divisors(master, xfer->speed_hz,
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700175 &div_cpsr, &div_scr);
H Hartley Sweetenf7ef1da2013-07-02 10:10:29 -0700176 if (err)
177 return err;
178
179 cr0 = div_scr << SSPCR0_SCR_SHIFT;
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700180 cr0 |= (spi->mode & (SPI_CPHA | SPI_CPOL)) << SSPCR0_MODE_SHIFT;
H Hartley Sweetend9b65df2013-07-02 10:09:29 -0700181 cr0 |= dss;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000182
H Hartley Sweeten48738832017-08-09 08:51:29 +1200183 dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700184 spi->mode, div_cpsr, div_scr, dss);
H Hartley Sweeten48738832017-08-09 08:51:29 +1200185 dev_dbg(&master->dev, "setup: cr0 %#x\n", cr0);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000186
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200187 writel(div_cpsr, espi->mmio + SSPCPSR);
188 writel(cr0, espi->mmio + SSPCR0);
H Hartley Sweetenf7ef1da2013-07-02 10:10:29 -0700189
190 return 0;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000191}
192
Mika Westerberg011f23a2010-05-06 04:47:04 +0000193static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
194{
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200195 u32 val = 0;
196
H Hartley Sweeten701c3582013-07-02 10:07:01 -0700197 if (t->bits_per_word > 8) {
Mika Westerberg011f23a2010-05-06 04:47:04 +0000198 if (t->tx_buf)
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200199 val = ((u16 *)t->tx_buf)[espi->tx];
200 espi->tx += 2;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000201 } else {
Mika Westerberg011f23a2010-05-06 04:47:04 +0000202 if (t->tx_buf)
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200203 val = ((u8 *)t->tx_buf)[espi->tx];
204 espi->tx += 1;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000205 }
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200206 writel(val, espi->mmio + SSPDR);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000207}
208
209static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
210{
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200211 u32 val;
212
213 val = readl(espi->mmio + SSPDR);
H Hartley Sweeten701c3582013-07-02 10:07:01 -0700214 if (t->bits_per_word > 8) {
Mika Westerberg011f23a2010-05-06 04:47:04 +0000215 if (t->rx_buf)
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200216 ((u16 *)t->rx_buf)[espi->rx] = val;
217 espi->rx += 2;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000218 } else {
Mika Westerberg011f23a2010-05-06 04:47:04 +0000219 if (t->rx_buf)
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200220 ((u8 *)t->rx_buf)[espi->rx] = val;
221 espi->rx += 1;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000222 }
223}
224
225/**
226 * ep93xx_spi_read_write() - perform next RX/TX transfer
227 * @espi: ep93xx SPI controller struct
228 *
229 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
230 * called several times, the whole transfer will be completed. Returns
231 * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
232 *
233 * When this function is finished, RX FIFO should be empty and TX FIFO should be
234 * full.
235 */
H Hartley Sweeten48738832017-08-09 08:51:29 +1200236static int ep93xx_spi_read_write(struct spi_master *master)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000237{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200238 struct ep93xx_spi *espi = spi_master_get_devdata(master);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000239 struct spi_message *msg = espi->current_msg;
240 struct spi_transfer *t = msg->state;
241
242 /* read as long as RX FIFO has frames in it */
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200243 while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) {
Mika Westerberg011f23a2010-05-06 04:47:04 +0000244 ep93xx_do_read(espi, t);
245 espi->fifo_level--;
246 }
247
248 /* write as long as TX FIFO has room */
249 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
250 ep93xx_do_write(espi, t);
251 espi->fifo_level++;
252 }
253
Mika Westerberg626a96d2011-05-29 13:10:06 +0300254 if (espi->rx == t->len)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000255 return 0;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000256
257 return -EINPROGRESS;
258}
259
H Hartley Sweeten48738832017-08-09 08:51:29 +1200260static void ep93xx_spi_pio_transfer(struct spi_master *master)
Mika Westerberg626a96d2011-05-29 13:10:06 +0300261{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200262 struct ep93xx_spi *espi = spi_master_get_devdata(master);
263
Mika Westerberg626a96d2011-05-29 13:10:06 +0300264 /*
265 * Now everything is set up for the current transfer. We prime the TX
266 * FIFO, enable interrupts, and wait for the transfer to complete.
267 */
H Hartley Sweeten48738832017-08-09 08:51:29 +1200268 if (ep93xx_spi_read_write(master)) {
H Hartley Sweetenac8d06d2017-08-09 08:51:28 +1200269 u32 val;
270
271 val = readl(espi->mmio + SSPCR1);
272 val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
273 writel(val, espi->mmio + SSPCR1);
274
Mika Westerberg626a96d2011-05-29 13:10:06 +0300275 wait_for_completion(&espi->wait);
276 }
277}
278
279/**
280 * ep93xx_spi_dma_prepare() - prepares a DMA transfer
H Hartley Sweeten48738832017-08-09 08:51:29 +1200281 * @master: SPI master
Mika Westerberg626a96d2011-05-29 13:10:06 +0300282 * @dir: DMA transfer direction
283 *
284 * Function configures the DMA, maps the buffer and prepares the DMA
285 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
286 * in case of failure.
287 */
288static struct dma_async_tx_descriptor *
H Hartley Sweeten48738832017-08-09 08:51:29 +1200289ep93xx_spi_dma_prepare(struct spi_master *master,
290 enum dma_transfer_direction dir)
Mika Westerberg626a96d2011-05-29 13:10:06 +0300291{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200292 struct ep93xx_spi *espi = spi_master_get_devdata(master);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300293 struct spi_transfer *t = espi->current_msg->state;
294 struct dma_async_tx_descriptor *txd;
295 enum dma_slave_buswidth buswidth;
296 struct dma_slave_config conf;
297 struct scatterlist *sg;
298 struct sg_table *sgt;
299 struct dma_chan *chan;
300 const void *buf, *pbuf;
301 size_t len = t->len;
302 int i, ret, nents;
303
H Hartley Sweeten701c3582013-07-02 10:07:01 -0700304 if (t->bits_per_word > 8)
Mika Westerberg626a96d2011-05-29 13:10:06 +0300305 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
306 else
307 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
308
309 memset(&conf, 0, sizeof(conf));
310 conf.direction = dir;
311
H Hartley Sweetend4b9b572012-04-17 18:46:36 -0700312 if (dir == DMA_DEV_TO_MEM) {
Mika Westerberg626a96d2011-05-29 13:10:06 +0300313 chan = espi->dma_rx;
314 buf = t->rx_buf;
315 sgt = &espi->rx_sgt;
316
317 conf.src_addr = espi->sspdr_phys;
318 conf.src_addr_width = buswidth;
319 } else {
320 chan = espi->dma_tx;
321 buf = t->tx_buf;
322 sgt = &espi->tx_sgt;
323
324 conf.dst_addr = espi->sspdr_phys;
325 conf.dst_addr_width = buswidth;
326 }
327
328 ret = dmaengine_slave_config(chan, &conf);
329 if (ret)
330 return ERR_PTR(ret);
331
332 /*
333 * We need to split the transfer into PAGE_SIZE'd chunks. This is
334 * because we are using @espi->zeropage to provide a zero RX buffer
335 * for the TX transfers and we have only allocated one page for that.
336 *
337 * For performance reasons we allocate a new sg_table only when
338 * needed. Otherwise we will re-use the current one. Eventually the
339 * last sg_table is released in ep93xx_spi_release_dma().
340 */
341
342 nents = DIV_ROUND_UP(len, PAGE_SIZE);
343 if (nents != sgt->nents) {
344 sg_free_table(sgt);
345
346 ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
347 if (ret)
348 return ERR_PTR(ret);
349 }
350
351 pbuf = buf;
352 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
353 size_t bytes = min_t(size_t, len, PAGE_SIZE);
354
355 if (buf) {
356 sg_set_page(sg, virt_to_page(pbuf), bytes,
357 offset_in_page(pbuf));
358 } else {
359 sg_set_page(sg, virt_to_page(espi->zeropage),
360 bytes, 0);
361 }
362
363 pbuf += bytes;
364 len -= bytes;
365 }
366
367 if (WARN_ON(len)) {
H Hartley Sweeten48738832017-08-09 08:51:29 +1200368 dev_warn(&master->dev, "len = %zu expected 0!\n", len);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300369 return ERR_PTR(-EINVAL);
370 }
371
372 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
373 if (!nents)
374 return ERR_PTR(-ENOMEM);
375
H Hartley Sweetend4b9b572012-04-17 18:46:36 -0700376 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300377 if (!txd) {
378 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
379 return ERR_PTR(-ENOMEM);
380 }
381 return txd;
382}
383
384/**
385 * ep93xx_spi_dma_finish() - finishes with a DMA transfer
H Hartley Sweeten48738832017-08-09 08:51:29 +1200386 * @master: SPI master
Mika Westerberg626a96d2011-05-29 13:10:06 +0300387 * @dir: DMA transfer direction
388 *
389 * Function finishes with the DMA transfer. After this, the DMA buffer is
390 * unmapped.
391 */
H Hartley Sweeten48738832017-08-09 08:51:29 +1200392static void ep93xx_spi_dma_finish(struct spi_master *master,
H Hartley Sweetend4b9b572012-04-17 18:46:36 -0700393 enum dma_transfer_direction dir)
Mika Westerberg626a96d2011-05-29 13:10:06 +0300394{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200395 struct ep93xx_spi *espi = spi_master_get_devdata(master);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300396 struct dma_chan *chan;
397 struct sg_table *sgt;
398
H Hartley Sweetend4b9b572012-04-17 18:46:36 -0700399 if (dir == DMA_DEV_TO_MEM) {
Mika Westerberg626a96d2011-05-29 13:10:06 +0300400 chan = espi->dma_rx;
401 sgt = &espi->rx_sgt;
402 } else {
403 chan = espi->dma_tx;
404 sgt = &espi->tx_sgt;
405 }
406
407 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
408}
409
410static void ep93xx_spi_dma_callback(void *callback_param)
411{
412 complete(callback_param);
413}
414
H Hartley Sweeten48738832017-08-09 08:51:29 +1200415static void ep93xx_spi_dma_transfer(struct spi_master *master)
Mika Westerberg626a96d2011-05-29 13:10:06 +0300416{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200417 struct ep93xx_spi *espi = spi_master_get_devdata(master);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300418 struct spi_message *msg = espi->current_msg;
419 struct dma_async_tx_descriptor *rxd, *txd;
420
H Hartley Sweeten48738832017-08-09 08:51:29 +1200421 rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300422 if (IS_ERR(rxd)) {
H Hartley Sweeten48738832017-08-09 08:51:29 +1200423 dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
Mika Westerberg626a96d2011-05-29 13:10:06 +0300424 msg->status = PTR_ERR(rxd);
425 return;
426 }
427
H Hartley Sweeten48738832017-08-09 08:51:29 +1200428 txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300429 if (IS_ERR(txd)) {
H Hartley Sweeten48738832017-08-09 08:51:29 +1200430 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
431 dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
Mika Westerberg626a96d2011-05-29 13:10:06 +0300432 msg->status = PTR_ERR(txd);
433 return;
434 }
435
436 /* We are ready when RX is done */
437 rxd->callback = ep93xx_spi_dma_callback;
438 rxd->callback_param = &espi->wait;
439
440 /* Now submit both descriptors and wait while they finish */
441 dmaengine_submit(rxd);
442 dmaengine_submit(txd);
443
444 dma_async_issue_pending(espi->dma_rx);
445 dma_async_issue_pending(espi->dma_tx);
446
447 wait_for_completion(&espi->wait);
448
H Hartley Sweeten48738832017-08-09 08:51:29 +1200449 ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV);
450 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300451}
452
Mika Westerberg011f23a2010-05-06 04:47:04 +0000453/**
454 * ep93xx_spi_process_transfer() - processes one SPI transfer
H Hartley Sweeten48738832017-08-09 08:51:29 +1200455 * @master: SPI master
Mika Westerberg011f23a2010-05-06 04:47:04 +0000456 * @msg: current message
457 * @t: transfer to process
458 *
459 * This function processes one SPI transfer given in @t. Function waits until
460 * transfer is complete (may sleep) and updates @msg->status based on whether
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300461 * transfer was successfully processed or not.
Mika Westerberg011f23a2010-05-06 04:47:04 +0000462 */
H Hartley Sweeten48738832017-08-09 08:51:29 +1200463static void ep93xx_spi_process_transfer(struct spi_master *master,
Mika Westerberg011f23a2010-05-06 04:47:04 +0000464 struct spi_message *msg,
465 struct spi_transfer *t)
466{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200467 struct ep93xx_spi *espi = spi_master_get_devdata(master);
H Hartley Sweeten4870c212013-06-28 11:43:34 -0700468 int err;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000469
470 msg->state = t;
471
H Hartley Sweeten48738832017-08-09 08:51:29 +1200472 err = ep93xx_spi_chip_setup(master, msg->spi, t);
H Hartley Sweeten4870c212013-06-28 11:43:34 -0700473 if (err) {
H Hartley Sweeten48738832017-08-09 08:51:29 +1200474 dev_err(&master->dev,
H Hartley Sweetenf7ef1da2013-07-02 10:10:29 -0700475 "failed to setup chip for transfer\n");
H Hartley Sweeten4870c212013-06-28 11:43:34 -0700476 msg->status = err;
477 return;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000478 }
479
480 espi->rx = 0;
481 espi->tx = 0;
482
483 /*
Mika Westerberg626a96d2011-05-29 13:10:06 +0300484 * There is no point of setting up DMA for the transfers which will
485 * fit into the FIFO and can be transferred with a single interrupt.
486 * So in these cases we will be using PIO and don't bother for DMA.
Mika Westerberg011f23a2010-05-06 04:47:04 +0000487 */
Mika Westerberg626a96d2011-05-29 13:10:06 +0300488 if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
H Hartley Sweeten48738832017-08-09 08:51:29 +1200489 ep93xx_spi_dma_transfer(master);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300490 else
H Hartley Sweeten48738832017-08-09 08:51:29 +1200491 ep93xx_spi_pio_transfer(master);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000492
493 /*
494 * In case of error during transmit, we bail out from processing
495 * the message.
496 */
497 if (msg->status)
498 return;
499
Mika Westerberg626a96d2011-05-29 13:10:06 +0300500 msg->actual_length += t->len;
501
Mika Westerberg011f23a2010-05-06 04:47:04 +0000502 /*
503 * After this transfer is finished, perform any possible
504 * post-transfer actions requested by the protocol driver.
505 */
506 if (t->delay_usecs) {
507 set_current_state(TASK_UNINTERRUPTIBLE);
508 schedule_timeout(usecs_to_jiffies(t->delay_usecs));
509 }
510 if (t->cs_change) {
511 if (!list_is_last(&t->transfer_list, &msg->transfers)) {
512 /*
513 * In case protocol driver is asking us to drop the
514 * chipselect briefly, we let the scheduler to handle
515 * any "delay" here.
516 */
517 ep93xx_spi_cs_control(msg->spi, false);
518 cond_resched();
519 ep93xx_spi_cs_control(msg->spi, true);
520 }
521 }
Mika Westerberg011f23a2010-05-06 04:47:04 +0000522}
523
524/*
525 * ep93xx_spi_process_message() - process one SPI message
H Hartley Sweeten48738832017-08-09 08:51:29 +1200526 * @master: SPI master
Mika Westerberg011f23a2010-05-06 04:47:04 +0000527 * @msg: message to process
528 *
529 * This function processes a single SPI message. We go through all transfers in
530 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
531 * asserted during the whole message (unless per transfer cs_change is set).
532 *
533 * @msg->status contains %0 in case of success or negative error code in case of
534 * failure.
535 */
H Hartley Sweeten48738832017-08-09 08:51:29 +1200536static void ep93xx_spi_process_message(struct spi_master *master,
Mika Westerberg011f23a2010-05-06 04:47:04 +0000537 struct spi_message *msg)
538{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200539 struct ep93xx_spi *espi = spi_master_get_devdata(master);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000540 unsigned long timeout;
541 struct spi_transfer *t;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000542
543 /*
544 * Just to be sure: flush any data from RX FIFO.
545 */
546 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200547 while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
Mika Westerberg011f23a2010-05-06 04:47:04 +0000548 if (time_after(jiffies, timeout)) {
H Hartley Sweeten48738832017-08-09 08:51:29 +1200549 dev_warn(&master->dev,
Mika Westerberg011f23a2010-05-06 04:47:04 +0000550 "timeout while flushing RX FIFO\n");
551 msg->status = -ETIMEDOUT;
552 return;
553 }
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200554 readl(espi->mmio + SSPDR);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000555 }
556
557 /*
558 * We explicitly handle FIFO level. This way we don't have to check TX
559 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
560 */
561 espi->fifo_level = 0;
562
563 /*
H Hartley Sweeten4870c212013-06-28 11:43:34 -0700564 * Assert the chipselect.
Mika Westerberg011f23a2010-05-06 04:47:04 +0000565 */
Mika Westerberg011f23a2010-05-06 04:47:04 +0000566 ep93xx_spi_cs_control(msg->spi, true);
567
568 list_for_each_entry(t, &msg->transfers, transfer_list) {
H Hartley Sweeten48738832017-08-09 08:51:29 +1200569 ep93xx_spi_process_transfer(master, msg, t);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000570 if (msg->status)
571 break;
572 }
573
574 /*
575 * Now the whole message is transferred (or failed for some reason). We
576 * deselect the device and disable the SPI controller.
577 */
578 ep93xx_spi_cs_control(msg->spi, false);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000579}
580
H Hartley Sweeten84ddb3c2013-07-08 09:12:37 -0700581static int ep93xx_spi_transfer_one_message(struct spi_master *master,
582 struct spi_message *msg)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000583{
H Hartley Sweeten84ddb3c2013-07-08 09:12:37 -0700584 struct ep93xx_spi *espi = spi_master_get_devdata(master);
H Hartley Sweeten84ddb3c2013-07-08 09:12:37 -0700585
586 msg->state = NULL;
587 msg->status = 0;
588 msg->actual_length = 0;
589
Mika Westerberg011f23a2010-05-06 04:47:04 +0000590 espi->current_msg = msg;
H Hartley Sweeten48738832017-08-09 08:51:29 +1200591 ep93xx_spi_process_message(master, msg);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000592 espi->current_msg = NULL;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000593
H Hartley Sweeten84ddb3c2013-07-08 09:12:37 -0700594 spi_finalize_current_message(master);
595
596 return 0;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000597}
598
599static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
600{
H Hartley Sweeten48738832017-08-09 08:51:29 +1200601 struct spi_master *master = dev_id;
602 struct ep93xx_spi *espi = spi_master_get_devdata(master);
H Hartley Sweetenac8d06d2017-08-09 08:51:28 +1200603 u32 val;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000604
605 /*
606 * If we got ROR (receive overrun) interrupt we know that something is
607 * wrong. Just abort the message.
608 */
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200609 if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) {
Mika Westerberg011f23a2010-05-06 04:47:04 +0000610 /* clear the overrun interrupt */
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200611 writel(0, espi->mmio + SSPICR);
H Hartley Sweeten48738832017-08-09 08:51:29 +1200612 dev_warn(&master->dev,
Mika Westerberg011f23a2010-05-06 04:47:04 +0000613 "receive overrun, aborting the message\n");
614 espi->current_msg->status = -EIO;
615 } else {
616 /*
617 * Interrupt is either RX (RIS) or TX (TIS). For both cases we
618 * simply execute next data transfer.
619 */
H Hartley Sweeten48738832017-08-09 08:51:29 +1200620 if (ep93xx_spi_read_write(master)) {
Mika Westerberg011f23a2010-05-06 04:47:04 +0000621 /*
622 * In normal case, there still is some processing left
623 * for current transfer. Let's wait for the next
624 * interrupt then.
625 */
626 return IRQ_HANDLED;
627 }
628 }
629
630 /*
631 * Current transfer is finished, either with error or with success. In
632 * any case we disable interrupts and notify the worker to handle
633 * any post-processing of the message.
634 */
H Hartley Sweetenac8d06d2017-08-09 08:51:28 +1200635 val = readl(espi->mmio + SSPCR1);
636 val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
637 writel(val, espi->mmio + SSPCR1);
638
Mika Westerberg011f23a2010-05-06 04:47:04 +0000639 complete(&espi->wait);
H Hartley Sweetenac8d06d2017-08-09 08:51:28 +1200640
Mika Westerberg011f23a2010-05-06 04:47:04 +0000641 return IRQ_HANDLED;
642}
643
H Hartley Sweeten16779622017-08-09 08:51:27 +1200644static int ep93xx_spi_prepare_hardware(struct spi_master *master)
645{
646 struct ep93xx_spi *espi = spi_master_get_devdata(master);
647 u32 val;
648 int ret;
649
650 ret = clk_enable(espi->clk);
651 if (ret)
652 return ret;
653
654 val = readl(espi->mmio + SSPCR1);
655 val |= SSPCR1_SSE;
656 writel(val, espi->mmio + SSPCR1);
657
658 return 0;
659}
660
661static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
662{
663 struct ep93xx_spi *espi = spi_master_get_devdata(master);
664 u32 val;
665
666 val = readl(espi->mmio + SSPCR1);
667 val &= ~SSPCR1_SSE;
668 writel(val, espi->mmio + SSPCR1);
669
670 clk_disable(espi->clk);
671
672 return 0;
673}
674
Mika Westerberg626a96d2011-05-29 13:10:06 +0300675static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
676{
677 if (ep93xx_dma_chan_is_m2p(chan))
678 return false;
679
680 chan->private = filter_param;
681 return true;
682}
683
684static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
685{
686 dma_cap_mask_t mask;
687 int ret;
688
689 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
690 if (!espi->zeropage)
691 return -ENOMEM;
692
693 dma_cap_zero(mask);
694 dma_cap_set(DMA_SLAVE, mask);
695
696 espi->dma_rx_data.port = EP93XX_DMA_SSP;
Vinod Koula485df42011-10-14 10:47:38 +0530697 espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300698 espi->dma_rx_data.name = "ep93xx-spi-rx";
699
700 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
701 &espi->dma_rx_data);
702 if (!espi->dma_rx) {
703 ret = -ENODEV;
704 goto fail_free_page;
705 }
706
707 espi->dma_tx_data.port = EP93XX_DMA_SSP;
Vinod Koula485df42011-10-14 10:47:38 +0530708 espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300709 espi->dma_tx_data.name = "ep93xx-spi-tx";
710
711 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
712 &espi->dma_tx_data);
713 if (!espi->dma_tx) {
714 ret = -ENODEV;
715 goto fail_release_rx;
716 }
717
718 return 0;
719
720fail_release_rx:
721 dma_release_channel(espi->dma_rx);
722 espi->dma_rx = NULL;
723fail_free_page:
724 free_page((unsigned long)espi->zeropage);
725
726 return ret;
727}
728
729static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
730{
731 if (espi->dma_rx) {
732 dma_release_channel(espi->dma_rx);
733 sg_free_table(&espi->rx_sgt);
734 }
735 if (espi->dma_tx) {
736 dma_release_channel(espi->dma_tx);
737 sg_free_table(&espi->tx_sgt);
738 }
739
740 if (espi->zeropage)
741 free_page((unsigned long)espi->zeropage);
742}
743
Grant Likelyfd4a3192012-12-07 16:57:14 +0000744static int ep93xx_spi_probe(struct platform_device *pdev)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000745{
746 struct spi_master *master;
747 struct ep93xx_spi_info *info;
748 struct ep93xx_spi *espi;
749 struct resource *res;
Hannu Heikkinen6d6467e2012-05-09 17:26:26 +0300750 int irq;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000751 int error;
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700752 int i;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000753
Jingoo Han8074cf02013-07-30 16:58:59 +0900754 info = dev_get_platdata(&pdev->dev);
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700755 if (!info) {
756 dev_err(&pdev->dev, "missing platform data\n");
757 return -EINVAL;
758 }
Mika Westerberg011f23a2010-05-06 04:47:04 +0000759
H Hartley Sweeten48a77762013-07-02 10:07:53 -0700760 irq = platform_get_irq(pdev, 0);
761 if (irq < 0) {
762 dev_err(&pdev->dev, "failed to get irq resources\n");
763 return -EBUSY;
764 }
765
766 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
767 if (!res) {
768 dev_err(&pdev->dev, "unable to get iomem resource\n");
769 return -ENODEV;
770 }
771
Mika Westerberg011f23a2010-05-06 04:47:04 +0000772 master = spi_alloc_master(&pdev->dev, sizeof(*espi));
H Hartley Sweetenb2d185e2013-07-02 10:08:59 -0700773 if (!master)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000774 return -ENOMEM;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000775
H Hartley Sweeten16779622017-08-09 08:51:27 +1200776 master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
777 master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
H Hartley Sweeten84ddb3c2013-07-08 09:12:37 -0700778 master->transfer_one_message = ep93xx_spi_transfer_one_message;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000779 master->bus_num = pdev->id;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000780 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
Stephen Warren24778be2013-05-21 20:36:35 -0600781 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000782
H Hartley Sweeten55f0cd3f2017-02-16 13:07:37 -0700783 master->num_chipselect = info->num_chipselect;
784 master->cs_gpios = devm_kzalloc(&master->dev,
785 sizeof(int) * master->num_chipselect,
786 GFP_KERNEL);
787 if (!master->cs_gpios) {
788 error = -ENOMEM;
789 goto fail_release_master;
790 }
791
792 for (i = 0; i < master->num_chipselect; i++) {
793 master->cs_gpios[i] = info->chipselect[i];
794
795 if (!gpio_is_valid(master->cs_gpios[i]))
796 continue;
797
798 error = devm_gpio_request_one(&pdev->dev, master->cs_gpios[i],
799 GPIOF_OUT_INIT_HIGH,
800 "ep93xx-spi");
801 if (error) {
802 dev_err(&pdev->dev, "could not request cs gpio %d\n",
803 master->cs_gpios[i]);
804 goto fail_release_master;
805 }
806 }
807
Mika Westerberg011f23a2010-05-06 04:47:04 +0000808 platform_set_drvdata(pdev, master);
809
810 espi = spi_master_get_devdata(master);
811
H Hartley Sweetene6eb8d92013-07-02 10:08:21 -0700812 espi->clk = devm_clk_get(&pdev->dev, NULL);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000813 if (IS_ERR(espi->clk)) {
814 dev_err(&pdev->dev, "unable to get spi clock\n");
815 error = PTR_ERR(espi->clk);
816 goto fail_release_master;
817 }
818
Mika Westerberg011f23a2010-05-06 04:47:04 +0000819 init_completion(&espi->wait);
820
821 /*
822 * Calculate maximum and minimum supported clock rates
823 * for the controller.
824 */
Axel Lin56fc0b42014-02-08 23:52:26 +0800825 master->max_speed_hz = clk_get_rate(espi->clk) / 2;
826 master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000827
Mika Westerberg626a96d2011-05-29 13:10:06 +0300828 espi->sspdr_phys = res->start + SSPDR;
Hannu Heikkinen6d6467e2012-05-09 17:26:26 +0300829
H Hartley Sweeten12329782017-08-09 08:51:25 +1200830 espi->mmio = devm_ioremap_resource(&pdev->dev, res);
831 if (IS_ERR(espi->mmio)) {
832 error = PTR_ERR(espi->mmio);
H Hartley Sweetene6eb8d92013-07-02 10:08:21 -0700833 goto fail_release_master;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000834 }
835
Hannu Heikkinen6d6467e2012-05-09 17:26:26 +0300836 error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
H Hartley Sweeten48738832017-08-09 08:51:29 +1200837 0, "ep93xx-spi", master);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000838 if (error) {
839 dev_err(&pdev->dev, "failed to request irq\n");
H Hartley Sweetene6eb8d92013-07-02 10:08:21 -0700840 goto fail_release_master;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000841 }
842
Mika Westerberg626a96d2011-05-29 13:10:06 +0300843 if (info->use_dma && ep93xx_spi_setup_dma(espi))
844 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
845
Mika Westerberg011f23a2010-05-06 04:47:04 +0000846 /* make sure that the hardware is disabled */
H Hartley Sweeten8447e472017-08-09 08:51:26 +1200847 writel(0, espi->mmio + SSPCR1);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000848
Jingoo Han434eaf32013-09-24 13:30:41 +0900849 error = devm_spi_register_master(&pdev->dev, master);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000850 if (error) {
851 dev_err(&pdev->dev, "failed to register SPI master\n");
H Hartley Sweeten84ddb3c2013-07-08 09:12:37 -0700852 goto fail_free_dma;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000853 }
854
855 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
Hannu Heikkinen6d6467e2012-05-09 17:26:26 +0300856 (unsigned long)res->start, irq);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000857
858 return 0;
859
Mika Westerberg626a96d2011-05-29 13:10:06 +0300860fail_free_dma:
861 ep93xx_spi_release_dma(espi);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000862fail_release_master:
863 spi_master_put(master);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000864
865 return error;
866}
867
Grant Likelyfd4a3192012-12-07 16:57:14 +0000868static int ep93xx_spi_remove(struct platform_device *pdev)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000869{
870 struct spi_master *master = platform_get_drvdata(pdev);
871 struct ep93xx_spi *espi = spi_master_get_devdata(master);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000872
Mika Westerberg626a96d2011-05-29 13:10:06 +0300873 ep93xx_spi_release_dma(espi);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000874
Mika Westerberg011f23a2010-05-06 04:47:04 +0000875 return 0;
876}
877
878static struct platform_driver ep93xx_spi_driver = {
879 .driver = {
880 .name = "ep93xx-spi",
Mika Westerberg011f23a2010-05-06 04:47:04 +0000881 },
Grant Likely940ab882011-10-05 11:29:49 -0600882 .probe = ep93xx_spi_probe,
Grant Likelyfd4a3192012-12-07 16:57:14 +0000883 .remove = ep93xx_spi_remove,
Mika Westerberg011f23a2010-05-06 04:47:04 +0000884};
Grant Likely940ab882011-10-05 11:29:49 -0600885module_platform_driver(ep93xx_spi_driver);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000886
887MODULE_DESCRIPTION("EP93xx SPI Controller driver");
888MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
889MODULE_LICENSE("GPL");
890MODULE_ALIAS("platform:ep93xx-spi");