blob: 6aab7b2136dbcadcce196195a6ed98676be3d4a1 [file] [log] [blame]
Wolfram Sang9135bac2018-08-22 00:02:23 +02001// SPDX-License-Identifier: GPL-2.0
Magnus Damm8051eff2009-11-26 11:10:05 +00002/*
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01003 * SuperH MSIOF SPI Controller Interface
Magnus Damm8051eff2009-11-26 11:10:05 +00004 *
5 * Copyright (c) 2009 Magnus Damm
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +02006 * Copyright (C) 2014 Renesas Electronics Corporation
7 * Copyright (C) 2014-2017 Glider bvba
Magnus Damm8051eff2009-11-26 11:10:05 +00008 */
9
Magnus Damm8051eff2009-11-26 11:10:05 +000010#include <linux/bitmap.h>
11#include <linux/clk.h>
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +010012#include <linux/completion.h>
13#include <linux/delay.h>
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +020014#include <linux/dma-mapping.h>
15#include <linux/dmaengine.h>
Magnus Dammac48eee2010-01-20 13:49:45 -070016#include <linux/err.h>
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +010017#include <linux/gpio.h>
Geert Uytterhoevenb8761432017-12-13 20:05:12 +010018#include <linux/gpio/consumer.h>
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +010019#include <linux/interrupt.h>
20#include <linux/io.h>
Geert Uytterhoeven9115b4d2019-04-02 16:40:22 +020021#include <linux/iopoll.h>
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +010022#include <linux/kernel.h>
Paul Gortmakerd7614de2011-07-03 15:44:29 -040023#include <linux/module.h>
Bastian Hechtcf9c86e2012-12-12 12:54:48 +010024#include <linux/of.h>
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +010025#include <linux/of_device.h>
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +010026#include <linux/platform_device.h>
27#include <linux/pm_runtime.h>
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +020028#include <linux/sh_dma.h>
Magnus Damm8051eff2009-11-26 11:10:05 +000029
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +010030#include <linux/spi/sh_msiof.h>
Magnus Damm8051eff2009-11-26 11:10:05 +000031#include <linux/spi/spi.h>
Magnus Damm8051eff2009-11-26 11:10:05 +000032
Magnus Damm8051eff2009-11-26 11:10:05 +000033#include <asm/unaligned.h>
34
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +010035struct sh_msiof_chipdata {
Geert Uytterhoeven0e836c32019-02-28 12:05:13 +010036 u32 bits_per_word_mask;
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +010037 u16 tx_fifo_size;
38 u16 rx_fifo_size;
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +010039 u16 ctlr_flags;
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +030040 u16 min_div_pow;
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +010041};
42
Magnus Damm8051eff2009-11-26 11:10:05 +000043struct sh_msiof_spi_priv {
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +010044 struct spi_controller *ctlr;
Magnus Damm8051eff2009-11-26 11:10:05 +000045 void __iomem *mapbase;
46 struct clk *clk;
47 struct platform_device *pdev;
48 struct sh_msiof_spi_info *info;
49 struct completion done;
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +020050 struct completion done_txdma;
Koji Matsuokafe78d0b2015-06-15 02:25:05 +090051 unsigned int tx_fifo_size;
52 unsigned int rx_fifo_size;
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +030053 unsigned int min_div_pow;
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +020054 void *tx_dma_page;
55 void *rx_dma_page;
56 dma_addr_t tx_dma_addr;
57 dma_addr_t rx_dma_addr;
Geert Uytterhoevenb8761432017-12-13 20:05:12 +010058 unsigned short unused_ss;
Geert Uytterhoeven7ff0b532017-12-13 20:05:10 +010059 bool native_cs_inited;
60 bool native_cs_high;
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +020061 bool slave_aborted;
Magnus Damm8051eff2009-11-26 11:10:05 +000062};
63
Geert Uytterhoeven9cce8822017-12-13 20:05:11 +010064#define MAX_SS 3 /* Maximum number of native chip selects */
65
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +010066#define TMDR1 0x00 /* Transmit Mode Register 1 */
67#define TMDR2 0x04 /* Transmit Mode Register 2 */
68#define TMDR3 0x08 /* Transmit Mode Register 3 */
69#define RMDR1 0x10 /* Receive Mode Register 1 */
70#define RMDR2 0x14 /* Receive Mode Register 2 */
71#define RMDR3 0x18 /* Receive Mode Register 3 */
72#define TSCR 0x20 /* Transmit Clock Select Register */
73#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
74#define CTR 0x28 /* Control Register */
75#define FCTR 0x30 /* FIFO Control Register */
76#define STR 0x40 /* Status Register */
77#define IER 0x44 /* Interrupt Enable Register */
78#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
79#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
80#define TFDR 0x50 /* Transmit FIFO Data Register */
81#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
82#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
83#define RFDR 0x60 /* Receive FIFO Data Register */
Magnus Damm8051eff2009-11-26 11:10:05 +000084
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +010085/* TMDR1 and RMDR1 */
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +020086#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
87#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
88#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
89#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
90#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
91#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
92#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
93#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
94#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
95#define MDR1_FLD_SHIFT 2
96#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +010097/* TMDR1 */
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +020098#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
99#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
100#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
Magnus Damm8051eff2009-11-26 11:10:05 +0000101
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100102/* TMDR2 and RMDR2 */
103#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
104#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +0200105#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100106
107/* TSCR and RSCR */
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +0200108#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100109#define SCR_BRPS(i) (((i) - 1) << 8)
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +0200110#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
111#define SCR_BRDV_DIV_2 0
112#define SCR_BRDV_DIV_4 1
113#define SCR_BRDV_DIV_8 2
114#define SCR_BRDV_DIV_16 3
115#define SCR_BRDV_DIV_32 4
116#define SCR_BRDV_DIV_1 7
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100117
118/* CTR */
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +0200119#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
120#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
121#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
122#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
123#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
124#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
125#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
126#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
127#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
128#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
129#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
130#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
131#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
132#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
133#define CTR_TXE BIT(9) /* Transmit Enable */
134#define CTR_RXE BIT(8) /* Receive Enable */
Geert Uytterhoevenfedd6942019-04-02 16:40:23 +0200135#define CTR_TXRST BIT(1) /* Transmit Reset */
136#define CTR_RXRST BIT(0) /* Receive Reset */
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100137
Geert Uytterhoeven2e2b3682014-06-20 12:16:16 +0200138/* FCTR */
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +0200139#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
140#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
141#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
142#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
143#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
144#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
145#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
146#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
147#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
148#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
149#define FCTR_TFUA_SHIFT 20
Geert Uytterhoeven2e2b3682014-06-20 12:16:16 +0200150#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +0200151#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
152#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
153#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
154#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
155#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
156#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
157#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
158#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
159#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
160#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
161#define FCTR_RFUA_SHIFT 4
Geert Uytterhoeven2e2b3682014-06-20 12:16:16 +0200162#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
163
164/* STR */
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +0200165#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
166#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
167#define STR_TEOF BIT(23) /* Frame Transmission End */
168#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
169#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
170#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
171#define STR_RFFUL BIT(13) /* Receive FIFO Full */
172#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
173#define STR_REOF BIT(7) /* Frame Reception End */
174#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
175#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
176#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
Geert Uytterhoeven2e2b3682014-06-20 12:16:16 +0200177
178/* IER */
Geert Uytterhoeven5a4df212019-04-02 16:40:21 +0200179#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
180#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
181#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
182#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
183#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
184#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
185#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
186#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
187#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
188#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
189#define IER_REOFE BIT(7) /* Frame Reception End Enable */
190#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
191#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
192#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100193
Magnus Damm8051eff2009-11-26 11:10:05 +0000194
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100195static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
Magnus Damm8051eff2009-11-26 11:10:05 +0000196{
197 switch (reg_offs) {
198 case TSCR:
199 case RSCR:
200 return ioread16(p->mapbase + reg_offs);
201 default:
202 return ioread32(p->mapbase + reg_offs);
203 }
204}
205
206static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100207 u32 value)
Magnus Damm8051eff2009-11-26 11:10:05 +0000208{
209 switch (reg_offs) {
210 case TSCR:
211 case RSCR:
212 iowrite16(value, p->mapbase + reg_offs);
213 break;
214 default:
215 iowrite32(value, p->mapbase + reg_offs);
216 break;
217 }
218}
219
220static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100221 u32 clr, u32 set)
Magnus Damm8051eff2009-11-26 11:10:05 +0000222{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100223 u32 mask = clr | set;
224 u32 data;
Magnus Damm8051eff2009-11-26 11:10:05 +0000225
226 data = sh_msiof_read(p, CTR);
227 data &= ~clr;
228 data |= set;
229 sh_msiof_write(p, CTR, data);
230
Geert Uytterhoeven9115b4d2019-04-02 16:40:22 +0200231 return readl_poll_timeout_atomic(p->mapbase + CTR, data,
232 (data & mask) == set, 10, 1000);
Magnus Damm8051eff2009-11-26 11:10:05 +0000233}
234
235static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
236{
237 struct sh_msiof_spi_priv *p = data;
238
239 /* just disable the interrupt and wake up */
240 sh_msiof_write(p, IER, 0);
241 complete(&p->done);
242
243 return IRQ_HANDLED;
244}
245
Geert Uytterhoevenfedd6942019-04-02 16:40:23 +0200246static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
247{
248 u32 mask = CTR_TXRST | CTR_RXRST;
249 u32 data;
250
251 data = sh_msiof_read(p, CTR);
252 data |= mask;
253 sh_msiof_write(p, CTR, data);
254
255 readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
256 100);
257}
258
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +0300259static const u32 sh_msiof_spi_div_array[] = {
260 SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
261 SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
Magnus Damm8051eff2009-11-26 11:10:05 +0000262};
263
264static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
Geert Uytterhoeven6a85fc52014-02-20 15:43:02 +0100265 unsigned long parent_rate, u32 spi_hz)
Magnus Damm8051eff2009-11-26 11:10:05 +0000266{
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +0300267 unsigned long div;
Nobuhiro Iwamatsu65d56652015-01-30 15:11:54 +0900268 u32 brps, scr;
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +0300269 unsigned int div_pow = p->min_div_pow;
Magnus Damm8051eff2009-11-26 11:10:05 +0000270
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +0300271 if (!spi_hz || !parent_rate) {
272 WARN(1, "Invalid clock rate parameters %lu and %u\n",
273 parent_rate, spi_hz);
274 return;
Magnus Damm8051eff2009-11-26 11:10:05 +0000275 }
276
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +0300277 div = DIV_ROUND_UP(parent_rate, spi_hz);
278 if (div <= 1024) {
279 /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
280 if (!div_pow && div <= 32 && div > 2)
281 div_pow = 1;
Magnus Damm8051eff2009-11-26 11:10:05 +0000282
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +0300283 if (div_pow)
284 brps = (div + 1) >> div_pow;
285 else
286 brps = div;
287
288 for (; brps > 32; div_pow++)
289 brps = (brps + 1) >> 1;
290 } else {
291 /* Set transfer rate composite divisor to 2^5 * 32 = 1024 */
292 dev_err(&p->pdev->dev,
293 "Requested SPI transfer rate %d is too low\n", spi_hz);
294 div_pow = 5;
295 brps = 32;
296 }
297
298 scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
Nobuhiro Iwamatsu65d56652015-01-30 15:11:54 +0900299 sh_msiof_write(p, TSCR, scr);
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100300 if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
Nobuhiro Iwamatsu65d56652015-01-30 15:11:54 +0900301 sh_msiof_write(p, RSCR, scr);
Magnus Damm8051eff2009-11-26 11:10:05 +0000302}
303
Yoshihiro Shimoda31106282014-12-19 17:15:53 +0900304static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
305{
306 /*
307 * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl
308 * b'000 : 0
309 * b'001 : 100
310 * b'010 : 200
311 * b'011 (SYNCDL only) : 300
312 * b'101 : 50
313 * b'110 : 150
314 */
315 if (dtdl_or_syncdl % 100)
316 return dtdl_or_syncdl / 100 + 5;
317 else
318 return dtdl_or_syncdl / 100;
319}
320
321static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
322{
323 u32 val;
324
325 if (!p->info)
326 return 0;
327
328 /* check if DTDL and SYNCDL is allowed value */
329 if (p->info->dtdl > 200 || p->info->syncdl > 300) {
330 dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
331 return 0;
332 }
333
334 /* check if the sum of DTDL and SYNCDL becomes an integer value */
335 if ((p->info->dtdl + p->info->syncdl) % 100) {
336 dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
337 return 0;
338 }
339
340 val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
341 val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
342
343 return val;
344}
345
Geert Uytterhoeven9cce8822017-12-13 20:05:11 +0100346static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100347 u32 cpol, u32 cpha,
Takashi Yoshii50a77992013-12-02 03:19:15 +0900348 u32 tx_hi_z, u32 lsb_first, u32 cs_high)
Magnus Damm8051eff2009-11-26 11:10:05 +0000349{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100350 u32 tmp;
Magnus Damm8051eff2009-11-26 11:10:05 +0000351 int edge;
352
353 /*
Markus Pietreke8708ef2010-02-02 11:29:15 +0900354 * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
355 * 0 0 10 10 1 1
356 * 0 1 10 10 0 0
357 * 1 0 11 11 0 0
358 * 1 1 11 11 1 1
Magnus Damm8051eff2009-11-26 11:10:05 +0000359 */
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100360 tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
361 tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
362 tmp |= lsb_first << MDR1_BITLSB_SHIFT;
Yoshihiro Shimoda31106282014-12-19 17:15:53 +0900363 tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100364 if (spi_controller_is_slave(p->ctlr)) {
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200365 sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
Geert Uytterhoeven9cce8822017-12-13 20:05:11 +0100366 } else {
367 sh_msiof_write(p, TMDR1,
368 tmp | MDR1_TRMD | TMDR1_PCON |
369 (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
370 }
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100371 if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
Geert Uytterhoevenbeb74bb2014-02-25 11:21:10 +0100372 /* These bits are reserved if RX needs TX */
373 tmp &= ~0x0000ffff;
374 }
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100375 sh_msiof_write(p, RMDR1, tmp);
Magnus Damm8051eff2009-11-26 11:10:05 +0000376
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100377 tmp = 0;
378 tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
379 tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
Magnus Damm8051eff2009-11-26 11:10:05 +0000380
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100381 edge = cpol ^ !cpha;
Magnus Damm8051eff2009-11-26 11:10:05 +0000382
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100383 tmp |= edge << CTR_TEDG_SHIFT;
384 tmp |= edge << CTR_REDG_SHIFT;
385 tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
Magnus Damm8051eff2009-11-26 11:10:05 +0000386 sh_msiof_write(p, CTR, tmp);
387}
388
389static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
390 const void *tx_buf, void *rx_buf,
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100391 u32 bits, u32 words)
Magnus Damm8051eff2009-11-26 11:10:05 +0000392{
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100393 u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
Magnus Damm8051eff2009-11-26 11:10:05 +0000394
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100395 if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
Magnus Damm8051eff2009-11-26 11:10:05 +0000396 sh_msiof_write(p, TMDR2, dr2);
397 else
Geert Uytterhoeven01cfef52014-02-20 15:43:03 +0100398 sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
Magnus Damm8051eff2009-11-26 11:10:05 +0000399
400 if (rx_buf)
401 sh_msiof_write(p, RMDR2, dr2);
Magnus Damm8051eff2009-11-26 11:10:05 +0000402}
403
404static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
405{
Hiromitsu Yamasaki31a5fae2018-09-05 10:49:37 +0200406 sh_msiof_write(p, STR,
407 sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
Magnus Damm8051eff2009-11-26 11:10:05 +0000408}
409
410static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
411 const void *tx_buf, int words, int fs)
412{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100413 const u8 *buf_8 = tx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000414 int k;
415
416 for (k = 0; k < words; k++)
417 sh_msiof_write(p, TFDR, buf_8[k] << fs);
418}
419
420static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
421 const void *tx_buf, int words, int fs)
422{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100423 const u16 *buf_16 = tx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000424 int k;
425
426 for (k = 0; k < words; k++)
427 sh_msiof_write(p, TFDR, buf_16[k] << fs);
428}
429
430static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
431 const void *tx_buf, int words, int fs)
432{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100433 const u16 *buf_16 = tx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000434 int k;
435
436 for (k = 0; k < words; k++)
437 sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
438}
439
440static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
441 const void *tx_buf, int words, int fs)
442{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100443 const u32 *buf_32 = tx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000444 int k;
445
446 for (k = 0; k < words; k++)
447 sh_msiof_write(p, TFDR, buf_32[k] << fs);
448}
449
450static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
451 const void *tx_buf, int words, int fs)
452{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100453 const u32 *buf_32 = tx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000454 int k;
455
456 for (k = 0; k < words; k++)
457 sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
458}
459
Guennadi Liakhovetski9dabb3f2011-01-21 16:56:42 +0100460static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
461 const void *tx_buf, int words, int fs)
462{
463 const u32 *buf_32 = tx_buf;
464 int k;
465
466 for (k = 0; k < words; k++)
467 sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
468}
469
470static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
471 const void *tx_buf, int words, int fs)
472{
473 const u32 *buf_32 = tx_buf;
474 int k;
475
476 for (k = 0; k < words; k++)
477 sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
478}
479
Magnus Damm8051eff2009-11-26 11:10:05 +0000480static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
481 void *rx_buf, int words, int fs)
482{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100483 u8 *buf_8 = rx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000484 int k;
485
486 for (k = 0; k < words; k++)
487 buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
488}
489
490static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
491 void *rx_buf, int words, int fs)
492{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100493 u16 *buf_16 = rx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000494 int k;
495
496 for (k = 0; k < words; k++)
497 buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
498}
499
500static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
501 void *rx_buf, int words, int fs)
502{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100503 u16 *buf_16 = rx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000504 int k;
505
506 for (k = 0; k < words; k++)
507 put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
508}
509
510static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
511 void *rx_buf, int words, int fs)
512{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100513 u32 *buf_32 = rx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000514 int k;
515
516 for (k = 0; k < words; k++)
517 buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
518}
519
520static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
521 void *rx_buf, int words, int fs)
522{
Guennadi Liakhovetskie2dbf5e2011-01-21 16:56:37 +0100523 u32 *buf_32 = rx_buf;
Magnus Damm8051eff2009-11-26 11:10:05 +0000524 int k;
525
526 for (k = 0; k < words; k++)
527 put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
528}
529
Guennadi Liakhovetski9dabb3f2011-01-21 16:56:42 +0100530static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
531 void *rx_buf, int words, int fs)
532{
533 u32 *buf_32 = rx_buf;
534 int k;
535
536 for (k = 0; k < words; k++)
537 buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
538}
539
540static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
541 void *rx_buf, int words, int fs)
542{
543 u32 *buf_32 = rx_buf;
544 int k;
545
546 for (k = 0; k < words; k++)
547 put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
548}
549
Geert Uytterhoeven8d195342014-02-20 15:43:04 +0100550static int sh_msiof_spi_setup(struct spi_device *spi)
Magnus Damm8051eff2009-11-26 11:10:05 +0000551{
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100552 struct sh_msiof_spi_priv *p =
553 spi_controller_get_devdata(spi->controller);
Geert Uytterhoeven7ff0b532017-12-13 20:05:10 +0100554 u32 clr, set, tmp;
Hisashi Nakamura015760562014-12-15 23:01:11 +0900555
Geert Uytterhoeven9fda6692019-04-03 17:08:52 +0200556 if (spi->cs_gpiod || spi_controller_is_slave(p->ctlr))
Geert Uytterhoeven7ff0b532017-12-13 20:05:10 +0100557 return 0;
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +0100558
Geert Uytterhoeven7ff0b532017-12-13 20:05:10 +0100559 if (p->native_cs_inited &&
560 (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
561 return 0;
Hisashi Nakamura015760562014-12-15 23:01:11 +0900562
Geert Uytterhoeven7ff0b532017-12-13 20:05:10 +0100563 /* Configure native chip select mode/polarity early */
564 clr = MDR1_SYNCMD_MASK;
Geert Uytterhoeven0921e112018-05-23 11:02:04 +0200565 set = MDR1_SYNCMD_SPI;
Geert Uytterhoeven7ff0b532017-12-13 20:05:10 +0100566 if (spi->mode & SPI_CS_HIGH)
567 clr |= BIT(MDR1_SYNCAC_SHIFT);
568 else
569 set |= BIT(MDR1_SYNCAC_SHIFT);
570 pm_runtime_get_sync(&p->pdev->dev);
571 tmp = sh_msiof_read(p, TMDR1) & ~clr;
Geert Uytterhoeven0921e112018-05-23 11:02:04 +0200572 sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
573 tmp = sh_msiof_read(p, RMDR1) & ~clr;
574 sh_msiof_write(p, RMDR1, tmp | set);
Geert Uytterhoevenc8935ef2015-01-07 16:37:25 +0100575 pm_runtime_put(&p->pdev->dev);
Geert Uytterhoeven7ff0b532017-12-13 20:05:10 +0100576 p->native_cs_high = spi->mode & SPI_CS_HIGH;
577 p->native_cs_inited = true;
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +0100578 return 0;
Geert Uytterhoeven8d195342014-02-20 15:43:04 +0100579}
580
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100581static int sh_msiof_prepare_message(struct spi_controller *ctlr,
Geert Uytterhoevenc833ff72014-02-25 11:21:11 +0100582 struct spi_message *msg)
583{
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100584 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
Geert Uytterhoevenc833ff72014-02-25 11:21:11 +0100585 const struct spi_device *spi = msg->spi;
Geert Uytterhoevenb8761432017-12-13 20:05:12 +0100586 u32 ss, cs_high;
Geert Uytterhoevenc833ff72014-02-25 11:21:11 +0100587
Geert Uytterhoevenc833ff72014-02-25 11:21:11 +0100588 /* Configure pins before asserting CS */
Geert Uytterhoeven9fda6692019-04-03 17:08:52 +0200589 if (spi->cs_gpiod) {
Geert Uytterhoevenb8761432017-12-13 20:05:12 +0100590 ss = p->unused_ss;
591 cs_high = p->native_cs_high;
592 } else {
593 ss = spi->chip_select;
594 cs_high = !!(spi->mode & SPI_CS_HIGH);
595 }
596 sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
Geert Uytterhoevenc833ff72014-02-25 11:21:11 +0100597 !!(spi->mode & SPI_CPHA),
598 !!(spi->mode & SPI_3WIRE),
Geert Uytterhoevenb8761432017-12-13 20:05:12 +0100599 !!(spi->mode & SPI_LSB_FIRST), cs_high);
Geert Uytterhoevenc833ff72014-02-25 11:21:11 +0100600 return 0;
Magnus Damm8051eff2009-11-26 11:10:05 +0000601}
602
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200603static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
604{
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100605 bool slave = spi_controller_is_slave(p->ctlr);
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200606 int ret = 0;
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200607
608 /* setup clock and rx/tx signals */
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200609 if (!slave)
610 ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200611 if (rx_buf && !ret)
612 ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
613 if (!ret)
614 ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
615
616 /* start by setting frame bit */
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200617 if (!ret && !slave)
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200618 ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
619
620 return ret;
621}
622
623static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
624{
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100625 bool slave = spi_controller_is_slave(p->ctlr);
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200626 int ret = 0;
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200627
628 /* shut down frame, rx/tx and clock signals */
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200629 if (!slave)
630 ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200631 if (!ret)
632 ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
633 if (rx_buf && !ret)
634 ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200635 if (!ret && !slave)
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200636 ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
637
638 return ret;
639}
640
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100641static int sh_msiof_slave_abort(struct spi_controller *ctlr)
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200642{
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100643 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200644
645 p->slave_aborted = true;
646 complete(&p->done);
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200647 complete(&p->done_txdma);
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200648 return 0;
649}
650
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200651static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
652 struct completion *x)
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200653{
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100654 if (spi_controller_is_slave(p->ctlr)) {
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200655 if (wait_for_completion_interruptible(x) ||
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200656 p->slave_aborted) {
657 dev_dbg(&p->pdev->dev, "interrupted\n");
658 return -EINTR;
659 }
660 } else {
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200661 if (!wait_for_completion_timeout(x, HZ)) {
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200662 dev_err(&p->pdev->dev, "timeout\n");
663 return -ETIMEDOUT;
664 }
665 }
666
667 return 0;
668}
669
Magnus Damm8051eff2009-11-26 11:10:05 +0000670static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
671 void (*tx_fifo)(struct sh_msiof_spi_priv *,
672 const void *, int, int),
673 void (*rx_fifo)(struct sh_msiof_spi_priv *,
674 void *, int, int),
675 const void *tx_buf, void *rx_buf,
676 int words, int bits)
677{
678 int fifo_shift;
679 int ret;
680
681 /* limit maximum word transfer to rx/tx fifo size */
682 if (tx_buf)
683 words = min_t(int, words, p->tx_fifo_size);
684 if (rx_buf)
685 words = min_t(int, words, p->rx_fifo_size);
686
687 /* the fifo contents need shifting */
688 fifo_shift = 32 - bits;
689
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200690 /* default FIFO watermarks for PIO */
691 sh_msiof_write(p, FCTR, 0);
692
Magnus Damm8051eff2009-11-26 11:10:05 +0000693 /* setup msiof transfer mode registers */
694 sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200695 sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
Magnus Damm8051eff2009-11-26 11:10:05 +0000696
697 /* write tx fifo */
698 if (tx_buf)
699 tx_fifo(p, tx_buf, words, fifo_shift);
700
Wolfram Sang16735d02013-11-14 14:32:02 -0800701 reinit_completion(&p->done);
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200702 p->slave_aborted = false;
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200703
704 ret = sh_msiof_spi_start(p, rx_buf);
Magnus Damm8051eff2009-11-26 11:10:05 +0000705 if (ret) {
706 dev_err(&p->pdev->dev, "failed to start hardware\n");
Geert Uytterhoeven75b82e22014-06-20 12:16:18 +0200707 goto stop_ier;
Magnus Damm8051eff2009-11-26 11:10:05 +0000708 }
709
710 /* wait for tx fifo to be emptied / rx fifo to be filled */
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200711 ret = sh_msiof_wait_for_completion(p, &p->done);
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200712 if (ret)
Geert Uytterhoeven75b82e22014-06-20 12:16:18 +0200713 goto stop_reset;
Magnus Damm8051eff2009-11-26 11:10:05 +0000714
715 /* read rx fifo */
716 if (rx_buf)
717 rx_fifo(p, rx_buf, words, fifo_shift);
718
719 /* clear status bits */
720 sh_msiof_reset_str(p);
721
Geert Uytterhoeven76c02e72014-06-20 12:16:17 +0200722 ret = sh_msiof_spi_stop(p, rx_buf);
Magnus Damm8051eff2009-11-26 11:10:05 +0000723 if (ret) {
724 dev_err(&p->pdev->dev, "failed to shut down hardware\n");
Geert Uytterhoeven75b82e22014-06-20 12:16:18 +0200725 return ret;
Magnus Damm8051eff2009-11-26 11:10:05 +0000726 }
727
728 return words;
729
Geert Uytterhoeven75b82e22014-06-20 12:16:18 +0200730stop_reset:
731 sh_msiof_reset_str(p);
732 sh_msiof_spi_stop(p, rx_buf);
733stop_ier:
Magnus Damm8051eff2009-11-26 11:10:05 +0000734 sh_msiof_write(p, IER, 0);
735 return ret;
736}
737
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200738static void sh_msiof_dma_complete(void *arg)
739{
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200740 complete(arg);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200741}
742
743static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
744 void *rx, unsigned int len)
745{
746 u32 ier_bits = 0;
747 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
748 dma_cookie_t cookie;
749 int ret;
750
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200751 /* First prepare and submit the DMA request(s), as this may fail */
752 if (rx) {
753 ier_bits |= IER_RDREQE | IER_RDMAE;
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100754 desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
Geert Uytterhoevenda779512018-03-21 09:07:23 +0100755 p->rx_dma_addr, len, DMA_DEV_TO_MEM,
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200756 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Geert Uytterhoevena5e7c712014-08-07 14:07:42 +0200757 if (!desc_rx)
758 return -EAGAIN;
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200759
760 desc_rx->callback = sh_msiof_dma_complete;
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200761 desc_rx->callback_param = &p->done;
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200762 cookie = dmaengine_submit(desc_rx);
Geert Uytterhoevena5e7c712014-08-07 14:07:42 +0200763 if (dma_submit_error(cookie))
764 return cookie;
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200765 }
766
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200767 if (tx) {
768 ier_bits |= IER_TDREQE | IER_TDMAE;
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100769 dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
Geert Uytterhoeven5dabcf22014-07-11 17:56:22 +0200770 p->tx_dma_addr, len, DMA_TO_DEVICE);
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100771 desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
Geert Uytterhoevenda779512018-03-21 09:07:23 +0100772 p->tx_dma_addr, len, DMA_MEM_TO_DEV,
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200773 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200774 if (!desc_tx) {
775 ret = -EAGAIN;
776 goto no_dma_tx;
777 }
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200778
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200779 desc_tx->callback = sh_msiof_dma_complete;
780 desc_tx->callback_param = &p->done_txdma;
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200781 cookie = dmaengine_submit(desc_tx);
782 if (dma_submit_error(cookie)) {
783 ret = cookie;
784 goto no_dma_tx;
785 }
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200786 }
Geert Uytterhoeven279d2372014-07-09 12:26:23 +0200787
788 /* 1 stage FIFO watermarks for DMA */
789 sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
790
791 /* setup msiof transfer mode registers (32-bit words) */
792 sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
793
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200794 sh_msiof_write(p, IER, ier_bits);
795
796 reinit_completion(&p->done);
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200797 if (tx)
798 reinit_completion(&p->done_txdma);
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200799 p->slave_aborted = false;
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200800
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200801 /* Now start DMA */
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200802 if (rx)
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100803 dma_async_issue_pending(p->ctlr->dma_rx);
Geert Uytterhoeven7a9f9572014-08-07 14:07:43 +0200804 if (tx)
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100805 dma_async_issue_pending(p->ctlr->dma_tx);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200806
807 ret = sh_msiof_spi_start(p, rx);
808 if (ret) {
809 dev_err(&p->pdev->dev, "failed to start hardware\n");
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200810 goto stop_dma;
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200811 }
812
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200813 if (tx) {
814 /* wait for tx DMA completion */
815 ret = sh_msiof_wait_for_completion(p, &p->done_txdma);
816 if (ret)
817 goto stop_reset;
818 }
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200819
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200820 if (rx) {
821 /* wait for rx DMA completion */
822 ret = sh_msiof_wait_for_completion(p, &p->done);
823 if (ret)
824 goto stop_reset;
Geert Uytterhoeven89434c32018-01-03 18:11:14 +0100825
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200826 sh_msiof_write(p, IER, 0);
827 } else {
Geert Uytterhoeven89434c32018-01-03 18:11:14 +0100828 /* wait for tx fifo to be emptied */
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +0200829 sh_msiof_write(p, IER, IER_TEOFE);
830 ret = sh_msiof_wait_for_completion(p, &p->done);
Geert Uytterhoeven89434c32018-01-03 18:11:14 +0100831 if (ret)
832 goto stop_reset;
833 }
834
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200835 /* clear status bits */
836 sh_msiof_reset_str(p);
837
838 ret = sh_msiof_spi_stop(p, rx);
839 if (ret) {
840 dev_err(&p->pdev->dev, "failed to shut down hardware\n");
841 return ret;
842 }
843
844 if (rx)
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100845 dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
846 p->rx_dma_addr, len, DMA_FROM_DEVICE);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200847
848 return 0;
849
850stop_reset:
851 sh_msiof_reset_str(p);
852 sh_msiof_spi_stop(p, rx);
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200853stop_dma:
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200854 if (tx)
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100855 dmaengine_terminate_all(p->ctlr->dma_tx);
Geert Uytterhoeven3e81b592014-08-06 14:59:03 +0200856no_dma_tx:
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200857 if (rx)
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100858 dmaengine_terminate_all(p->ctlr->dma_rx);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200859 sh_msiof_write(p, IER, 0);
860 return ret;
861}
862
863static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words)
864{
865 /* src or dst can be unaligned, but not both */
866 if ((unsigned long)src & 3) {
867 while (words--) {
868 *dst++ = swab32(get_unaligned(src));
869 src++;
870 }
871 } else if ((unsigned long)dst & 3) {
872 while (words--) {
873 put_unaligned(swab32(*src++), dst);
874 dst++;
875 }
876 } else {
877 while (words--)
878 *dst++ = swab32(*src++);
879 }
880}
881
882static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words)
883{
884 /* src or dst can be unaligned, but not both */
885 if ((unsigned long)src & 3) {
886 while (words--) {
887 *dst++ = swahw32(get_unaligned(src));
888 src++;
889 }
890 } else if ((unsigned long)dst & 3) {
891 while (words--) {
892 put_unaligned(swahw32(*src++), dst);
893 dst++;
894 }
895 } else {
896 while (words--)
897 *dst++ = swahw32(*src++);
898 }
899}
900
901static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
902{
903 memcpy(dst, src, words * 4);
904}
905
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100906static int sh_msiof_transfer_one(struct spi_controller *ctlr,
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +0100907 struct spi_device *spi,
908 struct spi_transfer *t)
Magnus Damm8051eff2009-11-26 11:10:05 +0000909{
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100910 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200911 void (*copy32)(u32 *, const u32 *, unsigned int);
Magnus Damm8051eff2009-11-26 11:10:05 +0000912 void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
913 void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
Geert Uytterhoeven0312d592014-06-20 12:16:19 +0200914 const void *tx_buf = t->tx_buf;
915 void *rx_buf = t->rx_buf;
916 unsigned int len = t->len;
917 unsigned int bits = t->bits_per_word;
918 unsigned int bytes_per_word;
919 unsigned int words;
Magnus Damm8051eff2009-11-26 11:10:05 +0000920 int n;
Guennadi Liakhovetski9dabb3f2011-01-21 16:56:42 +0100921 bool swab;
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200922 int ret;
Magnus Damm8051eff2009-11-26 11:10:05 +0000923
Geert Uytterhoevenfedd6942019-04-02 16:40:23 +0200924 /* reset registers */
925 sh_msiof_spi_reset_regs(p);
926
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200927 /* setup clocks (clock already enabled in chipselect()) */
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100928 if (!spi_controller_is_slave(p->ctlr))
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +0200929 sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200930
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +0100931 while (ctlr->dma_tx && len > 15) {
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200932 /*
933 * DMA supports 32-bit words only, hence pack 8-bit and 16-bit
934 * words, with byte resp. word swapping.
935 */
Koji Matsuokafe78d0b2015-06-15 02:25:05 +0900936 unsigned int l = 0;
937
938 if (tx_buf)
Hoan Nguyen And05e3ea2019-01-18 18:29:31 +0900939 l = min(round_down(len, 4), p->tx_fifo_size * 4);
Koji Matsuokafe78d0b2015-06-15 02:25:05 +0900940 if (rx_buf)
Hoan Nguyen And05e3ea2019-01-18 18:29:31 +0900941 l = min(round_down(len, 4), p->rx_fifo_size * 4);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200942
943 if (bits <= 8) {
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200944 copy32 = copy_bswap32;
945 } else if (bits <= 16) {
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200946 copy32 = copy_wswap32;
947 } else {
948 copy32 = copy_plain32;
949 }
950
951 if (tx_buf)
952 copy32(p->tx_dma_page, tx_buf, l / 4);
953
954 ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
Geert Uytterhoeven279d2372014-07-09 12:26:23 +0200955 if (ret == -EAGAIN) {
Geert Uytterhoeven5d8e6142017-11-30 14:38:50 +0100956 dev_warn_once(&p->pdev->dev,
957 "DMA not available, falling back to PIO\n");
Geert Uytterhoeven279d2372014-07-09 12:26:23 +0200958 break;
959 }
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +0200960 if (ret)
961 return ret;
962
963 if (rx_buf) {
964 copy32(rx_buf, p->rx_dma_page, l / 4);
965 rx_buf += l;
966 }
967 if (tx_buf)
968 tx_buf += l;
969
970 len -= l;
971 if (!len)
972 return 0;
973 }
Magnus Damm8051eff2009-11-26 11:10:05 +0000974
Hoan Nguyen An916d9802018-12-20 17:48:42 +0900975 if (bits <= 8 && len > 15) {
Guennadi Liakhovetski9dabb3f2011-01-21 16:56:42 +0100976 bits = 32;
977 swab = true;
978 } else {
979 swab = false;
980 }
981
Magnus Damm8051eff2009-11-26 11:10:05 +0000982 /* setup bytes per word and fifo read/write functions */
983 if (bits <= 8) {
984 bytes_per_word = 1;
985 tx_fifo = sh_msiof_spi_write_fifo_8;
986 rx_fifo = sh_msiof_spi_read_fifo_8;
987 } else if (bits <= 16) {
988 bytes_per_word = 2;
Geert Uytterhoeven0312d592014-06-20 12:16:19 +0200989 if ((unsigned long)tx_buf & 0x01)
Magnus Damm8051eff2009-11-26 11:10:05 +0000990 tx_fifo = sh_msiof_spi_write_fifo_16u;
991 else
992 tx_fifo = sh_msiof_spi_write_fifo_16;
993
Geert Uytterhoeven0312d592014-06-20 12:16:19 +0200994 if ((unsigned long)rx_buf & 0x01)
Magnus Damm8051eff2009-11-26 11:10:05 +0000995 rx_fifo = sh_msiof_spi_read_fifo_16u;
996 else
997 rx_fifo = sh_msiof_spi_read_fifo_16;
Guennadi Liakhovetski9dabb3f2011-01-21 16:56:42 +0100998 } else if (swab) {
999 bytes_per_word = 4;
Geert Uytterhoeven0312d592014-06-20 12:16:19 +02001000 if ((unsigned long)tx_buf & 0x03)
Guennadi Liakhovetski9dabb3f2011-01-21 16:56:42 +01001001 tx_fifo = sh_msiof_spi_write_fifo_s32u;
1002 else
1003 tx_fifo = sh_msiof_spi_write_fifo_s32;
1004
Geert Uytterhoeven0312d592014-06-20 12:16:19 +02001005 if ((unsigned long)rx_buf & 0x03)
Guennadi Liakhovetski9dabb3f2011-01-21 16:56:42 +01001006 rx_fifo = sh_msiof_spi_read_fifo_s32u;
1007 else
1008 rx_fifo = sh_msiof_spi_read_fifo_s32;
Magnus Damm8051eff2009-11-26 11:10:05 +00001009 } else {
1010 bytes_per_word = 4;
Geert Uytterhoeven0312d592014-06-20 12:16:19 +02001011 if ((unsigned long)tx_buf & 0x03)
Magnus Damm8051eff2009-11-26 11:10:05 +00001012 tx_fifo = sh_msiof_spi_write_fifo_32u;
1013 else
1014 tx_fifo = sh_msiof_spi_write_fifo_32;
1015
Geert Uytterhoeven0312d592014-06-20 12:16:19 +02001016 if ((unsigned long)rx_buf & 0x03)
Magnus Damm8051eff2009-11-26 11:10:05 +00001017 rx_fifo = sh_msiof_spi_read_fifo_32u;
1018 else
1019 rx_fifo = sh_msiof_spi_read_fifo_32;
1020 }
1021
Magnus Damm8051eff2009-11-26 11:10:05 +00001022 /* transfer in fifo sized chunks */
Geert Uytterhoeven0312d592014-06-20 12:16:19 +02001023 words = len / bytes_per_word;
Magnus Damm8051eff2009-11-26 11:10:05 +00001024
Geert Uytterhoeven0312d592014-06-20 12:16:19 +02001025 while (words > 0) {
1026 n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
Magnus Damm8051eff2009-11-26 11:10:05 +00001027 words, bits);
1028 if (n < 0)
Geert Uytterhoeven75b82e22014-06-20 12:16:18 +02001029 return n;
Magnus Damm8051eff2009-11-26 11:10:05 +00001030
Geert Uytterhoeven0312d592014-06-20 12:16:19 +02001031 if (tx_buf)
1032 tx_buf += n * bytes_per_word;
1033 if (rx_buf)
1034 rx_buf += n * bytes_per_word;
Magnus Damm8051eff2009-11-26 11:10:05 +00001035 words -= n;
Hoan Nguyen An916d9802018-12-20 17:48:42 +09001036
1037 if (words == 0 && (len % bytes_per_word)) {
1038 words = len % bytes_per_word;
1039 bits = t->bits_per_word;
1040 bytes_per_word = 1;
1041 tx_fifo = sh_msiof_spi_write_fifo_8;
1042 rx_fifo = sh_msiof_spi_read_fifo_8;
1043 }
Magnus Damm8051eff2009-11-26 11:10:05 +00001044 }
1045
Magnus Damm8051eff2009-11-26 11:10:05 +00001046 return 0;
1047}
1048
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +01001049static const struct sh_msiof_chipdata sh_data = {
Geert Uytterhoeven0e836c32019-02-28 12:05:13 +01001050 .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32),
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +01001051 .tx_fifo_size = 64,
1052 .rx_fifo_size = 64,
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001053 .ctlr_flags = 0,
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +03001054 .min_div_pow = 0,
Geert Uytterhoevenbeb74bb2014-02-25 11:21:10 +01001055};
1056
Geert Uytterhoeven61a8dec2017-07-12 12:26:01 +02001057static const struct sh_msiof_chipdata rcar_gen2_data = {
Geert Uytterhoeven0e836c32019-02-28 12:05:13 +01001058 .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
1059 SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
Geert Uytterhoevenbeb74bb2014-02-25 11:21:10 +01001060 .tx_fifo_size = 64,
Koji Matsuokafe78d0b2015-06-15 02:25:05 +09001061 .rx_fifo_size = 64,
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001062 .ctlr_flags = SPI_CONTROLLER_MUST_TX,
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +03001063 .min_div_pow = 0,
Geert Uytterhoeven61a8dec2017-07-12 12:26:01 +02001064};
1065
1066static const struct sh_msiof_chipdata rcar_gen3_data = {
Geert Uytterhoeven0e836c32019-02-28 12:05:13 +01001067 .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
1068 SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
Geert Uytterhoeven61a8dec2017-07-12 12:26:01 +02001069 .tx_fifo_size = 64,
1070 .rx_fifo_size = 64,
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001071 .ctlr_flags = SPI_CONTROLLER_MUST_TX,
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +03001072 .min_div_pow = 1,
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +01001073};
1074
1075static const struct of_device_id sh_msiof_match[] = {
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +01001076 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
Fabrizio Castrobdacfc72017-09-25 09:54:19 +01001077 { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
1078 { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
Geert Uytterhoeven61a8dec2017-07-12 12:26:01 +02001079 { .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
1080 { .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
1081 { .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
1082 { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
1083 { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
1084 { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
1085 { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
1086 { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
Simon Horman264c3e82016-12-20 11:21:16 +01001087 { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +01001088 {},
1089};
1090MODULE_DEVICE_TABLE(of, sh_msiof_match);
1091
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001092#ifdef CONFIG_OF
1093static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1094{
1095 struct sh_msiof_spi_info *info;
1096 struct device_node *np = dev->of_node;
Geert Uytterhoeven32d3b2d2014-02-25 11:21:08 +01001097 u32 num_cs = 1;
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001098
1099 info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
Jingoo Han1e8231b2014-04-29 17:21:25 +09001100 if (!info)
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001101 return NULL;
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001102
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +02001103 info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_SLAVE
1104 : MSIOF_SPI_MASTER;
1105
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001106 /* Parse the MSIOF properties */
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +02001107 if (info->mode == MSIOF_SPI_MASTER)
1108 of_property_read_u32(np, "num-cs", &num_cs);
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001109 of_property_read_u32(np, "renesas,tx-fifo-size",
1110 &info->tx_fifo_override);
1111 of_property_read_u32(np, "renesas,rx-fifo-size",
1112 &info->rx_fifo_override);
Yoshihiro Shimoda31106282014-12-19 17:15:53 +09001113 of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
1114 of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001115
1116 info->num_chipselect = num_cs;
1117
1118 return info;
1119}
1120#else
1121static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
1122{
1123 return NULL;
1124}
1125#endif
1126
Geert Uytterhoevenb8761432017-12-13 20:05:12 +01001127static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
1128{
1129 struct device *dev = &p->pdev->dev;
1130 unsigned int used_ss_mask = 0;
1131 unsigned int cs_gpios = 0;
1132 unsigned int num_cs, i;
1133 int ret;
1134
1135 ret = gpiod_count(dev, "cs");
1136 if (ret <= 0)
1137 return 0;
1138
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001139 num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
Geert Uytterhoevenb8761432017-12-13 20:05:12 +01001140 for (i = 0; i < num_cs; i++) {
1141 struct gpio_desc *gpiod;
1142
1143 gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
1144 if (!IS_ERR(gpiod)) {
Geert Uytterhoeven9fda6692019-04-03 17:08:52 +02001145 devm_gpiod_put(dev, gpiod);
Geert Uytterhoevenb8761432017-12-13 20:05:12 +01001146 cs_gpios++;
1147 continue;
1148 }
1149
1150 if (PTR_ERR(gpiod) != -ENOENT)
1151 return PTR_ERR(gpiod);
1152
1153 if (i >= MAX_SS) {
1154 dev_err(dev, "Invalid native chip select %d\n", i);
1155 return -EINVAL;
1156 }
1157 used_ss_mask |= BIT(i);
1158 }
1159 p->unused_ss = ffz(used_ss_mask);
1160 if (cs_gpios && p->unused_ss >= MAX_SS) {
1161 dev_err(dev, "No unused native chip select available\n");
1162 return -EINVAL;
1163 }
1164 return 0;
1165}
1166
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001167static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
1168 enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
1169{
1170 dma_cap_mask_t mask;
1171 struct dma_chan *chan;
1172 struct dma_slave_config cfg;
1173 int ret;
1174
1175 dma_cap_zero(mask);
1176 dma_cap_set(DMA_SLAVE, mask);
1177
Geert Uytterhoevena6be4de2014-08-06 14:59:05 +02001178 chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
1179 (void *)(unsigned long)id, dev,
1180 dir == DMA_MEM_TO_DEV ? "tx" : "rx");
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001181 if (!chan) {
Geert Uytterhoevena6be4de2014-08-06 14:59:05 +02001182 dev_warn(dev, "dma_request_slave_channel_compat failed\n");
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001183 return NULL;
1184 }
1185
1186 memset(&cfg, 0, sizeof(cfg));
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001187 cfg.direction = dir;
Geert Uytterhoeven52fba2b2014-08-06 14:59:04 +02001188 if (dir == DMA_MEM_TO_DEV) {
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001189 cfg.dst_addr = port_addr;
Geert Uytterhoeven52fba2b2014-08-06 14:59:04 +02001190 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1191 } else {
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001192 cfg.src_addr = port_addr;
Geert Uytterhoeven52fba2b2014-08-06 14:59:04 +02001193 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1194 }
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001195
1196 ret = dmaengine_slave_config(chan, &cfg);
1197 if (ret) {
1198 dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1199 dma_release_channel(chan);
1200 return NULL;
1201 }
1202
1203 return chan;
1204}
1205
1206static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1207{
1208 struct platform_device *pdev = p->pdev;
1209 struct device *dev = &pdev->dev;
Hoan Nguyen Anf70351a2019-01-18 18:29:30 +09001210 const struct sh_msiof_spi_info *info = p->info;
Geert Uytterhoevena6be4de2014-08-06 14:59:05 +02001211 unsigned int dma_tx_id, dma_rx_id;
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001212 const struct resource *res;
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001213 struct spi_controller *ctlr;
Geert Uytterhoeven5dabcf22014-07-11 17:56:22 +02001214 struct device *tx_dev, *rx_dev;
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001215
Geert Uytterhoevena6be4de2014-08-06 14:59:05 +02001216 if (dev->of_node) {
1217 /* In the OF case we will get the slave IDs from the DT */
1218 dma_tx_id = 0;
1219 dma_rx_id = 0;
1220 } else if (info && info->dma_tx_id && info->dma_rx_id) {
1221 dma_tx_id = info->dma_tx_id;
1222 dma_rx_id = info->dma_rx_id;
1223 } else {
1224 /* The driver assumes no error */
1225 return 0;
1226 }
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001227
1228 /* The DMA engine uses the second register set, if present */
1229 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1230 if (!res)
1231 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1232
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001233 ctlr = p->ctlr;
1234 ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
1235 dma_tx_id, res->start + TFDR);
1236 if (!ctlr->dma_tx)
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001237 return -ENODEV;
1238
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001239 ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
1240 dma_rx_id, res->start + RFDR);
1241 if (!ctlr->dma_rx)
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001242 goto free_tx_chan;
1243
1244 p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1245 if (!p->tx_dma_page)
1246 goto free_rx_chan;
1247
1248 p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
1249 if (!p->rx_dma_page)
1250 goto free_tx_page;
1251
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001252 tx_dev = ctlr->dma_tx->device->dev;
Geert Uytterhoeven5dabcf22014-07-11 17:56:22 +02001253 p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001254 DMA_TO_DEVICE);
Geert Uytterhoeven5dabcf22014-07-11 17:56:22 +02001255 if (dma_mapping_error(tx_dev, p->tx_dma_addr))
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001256 goto free_rx_page;
1257
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001258 rx_dev = ctlr->dma_rx->device->dev;
Geert Uytterhoeven5dabcf22014-07-11 17:56:22 +02001259 p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001260 DMA_FROM_DEVICE);
Geert Uytterhoeven5dabcf22014-07-11 17:56:22 +02001261 if (dma_mapping_error(rx_dev, p->rx_dma_addr))
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001262 goto unmap_tx_page;
1263
1264 dev_info(dev, "DMA available");
1265 return 0;
1266
1267unmap_tx_page:
Geert Uytterhoeven5dabcf22014-07-11 17:56:22 +02001268 dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001269free_rx_page:
1270 free_page((unsigned long)p->rx_dma_page);
1271free_tx_page:
1272 free_page((unsigned long)p->tx_dma_page);
1273free_rx_chan:
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001274 dma_release_channel(ctlr->dma_rx);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001275free_tx_chan:
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001276 dma_release_channel(ctlr->dma_tx);
1277 ctlr->dma_tx = NULL;
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001278 return -ENODEV;
1279}
1280
1281static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
1282{
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001283 struct spi_controller *ctlr = p->ctlr;
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001284
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001285 if (!ctlr->dma_tx)
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001286 return;
1287
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001288 dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
1289 DMA_FROM_DEVICE);
1290 dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
1291 DMA_TO_DEVICE);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001292 free_page((unsigned long)p->rx_dma_page);
1293 free_page((unsigned long)p->tx_dma_page);
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001294 dma_release_channel(ctlr->dma_rx);
1295 dma_release_channel(ctlr->dma_tx);
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001296}
1297
Magnus Damm8051eff2009-11-26 11:10:05 +00001298static int sh_msiof_spi_probe(struct platform_device *pdev)
1299{
1300 struct resource *r;
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001301 struct spi_controller *ctlr;
Geert Uytterhoevena6802cc2016-06-22 14:50:03 +02001302 const struct sh_msiof_chipdata *chipdata;
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +02001303 struct sh_msiof_spi_info *info;
Magnus Damm8051eff2009-11-26 11:10:05 +00001304 struct sh_msiof_spi_priv *p;
Magnus Damm8051eff2009-11-26 11:10:05 +00001305 int i;
1306 int ret;
1307
Geert Uytterhoevenecb15962017-10-04 14:20:27 +02001308 chipdata = of_device_get_match_data(&pdev->dev);
1309 if (chipdata) {
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +02001310 info = sh_msiof_spi_parse_dt(&pdev->dev);
1311 } else {
1312 chipdata = (const void *)pdev->id_entry->driver_data;
1313 info = dev_get_platdata(&pdev->dev);
1314 }
1315
1316 if (!info) {
1317 dev_err(&pdev->dev, "failed to obtain device info\n");
1318 return -ENXIO;
1319 }
1320
1321 if (info->mode == MSIOF_SPI_SLAVE)
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001322 ctlr = spi_alloc_slave(&pdev->dev,
1323 sizeof(struct sh_msiof_spi_priv));
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +02001324 else
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001325 ctlr = spi_alloc_master(&pdev->dev,
1326 sizeof(struct sh_msiof_spi_priv));
1327 if (ctlr == NULL)
Laurent Pinchartb4dd05de32013-11-28 02:39:42 +01001328 return -ENOMEM;
Magnus Damm8051eff2009-11-26 11:10:05 +00001329
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001330 p = spi_controller_get_devdata(ctlr);
Magnus Damm8051eff2009-11-26 11:10:05 +00001331
1332 platform_set_drvdata(pdev, p);
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001333 p->ctlr = ctlr;
Hisashi Nakamuracf9e4782017-05-22 15:11:43 +02001334 p->info = info;
Vladimir Zapolskiy51093cb2018-04-13 15:44:17 +03001335 p->min_div_pow = chipdata->min_div_pow;
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001336
Magnus Damm8051eff2009-11-26 11:10:05 +00001337 init_completion(&p->done);
Geert Uytterhoeven08ba7ae2018-06-13 10:41:15 +02001338 init_completion(&p->done_txdma);
Magnus Damm8051eff2009-11-26 11:10:05 +00001339
Laurent Pinchartb4dd05de32013-11-28 02:39:42 +01001340 p->clk = devm_clk_get(&pdev->dev, NULL);
Magnus Damm8051eff2009-11-26 11:10:05 +00001341 if (IS_ERR(p->clk)) {
Bastian Hecht078b6ea2012-11-07 12:40:04 +01001342 dev_err(&pdev->dev, "cannot get clock\n");
Magnus Damm8051eff2009-11-26 11:10:05 +00001343 ret = PTR_ERR(p->clk);
1344 goto err1;
1345 }
1346
Magnus Damm8051eff2009-11-26 11:10:05 +00001347 i = platform_get_irq(pdev, 0);
Laurent Pinchartb4dd05de32013-11-28 02:39:42 +01001348 if (i < 0) {
Sergei Shtylyovf34c6e62018-10-12 22:48:22 +03001349 dev_err(&pdev->dev, "cannot get IRQ\n");
1350 ret = i;
Laurent Pinchartb4dd05de32013-11-28 02:39:42 +01001351 goto err1;
Magnus Damm8051eff2009-11-26 11:10:05 +00001352 }
1353
Laurent Pinchartb4dd05de32013-11-28 02:39:42 +01001354 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1355 p->mapbase = devm_ioremap_resource(&pdev->dev, r);
1356 if (IS_ERR(p->mapbase)) {
1357 ret = PTR_ERR(p->mapbase);
1358 goto err1;
1359 }
1360
1361 ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
1362 dev_name(&pdev->dev), p);
Magnus Damm8051eff2009-11-26 11:10:05 +00001363 if (ret) {
1364 dev_err(&pdev->dev, "unable to request irq\n");
Laurent Pinchartb4dd05de32013-11-28 02:39:42 +01001365 goto err1;
Magnus Damm8051eff2009-11-26 11:10:05 +00001366 }
1367
1368 p->pdev = pdev;
1369 pm_runtime_enable(&pdev->dev);
1370
Magnus Damm8051eff2009-11-26 11:10:05 +00001371 /* Platform data may override FIFO sizes */
Geert Uytterhoevena6802cc2016-06-22 14:50:03 +02001372 p->tx_fifo_size = chipdata->tx_fifo_size;
1373 p->rx_fifo_size = chipdata->rx_fifo_size;
Magnus Damm8051eff2009-11-26 11:10:05 +00001374 if (p->info->tx_fifo_override)
1375 p->tx_fifo_size = p->info->tx_fifo_override;
1376 if (p->info->rx_fifo_override)
1377 p->rx_fifo_size = p->info->rx_fifo_override;
1378
Geert Uytterhoevenb8761432017-12-13 20:05:12 +01001379 /* Setup GPIO chip selects */
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001380 ctlr->num_chipselect = p->info->num_chipselect;
Geert Uytterhoevenb8761432017-12-13 20:05:12 +01001381 ret = sh_msiof_get_cs_gpios(p);
1382 if (ret)
1383 goto err1;
1384
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001385 /* init controller code */
1386 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1387 ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
1388 ctlr->flags = chipdata->ctlr_flags;
1389 ctlr->bus_num = pdev->id;
1390 ctlr->dev.of_node = pdev->dev.of_node;
1391 ctlr->setup = sh_msiof_spi_setup;
1392 ctlr->prepare_message = sh_msiof_prepare_message;
1393 ctlr->slave_abort = sh_msiof_slave_abort;
Geert Uytterhoeven0e836c32019-02-28 12:05:13 +01001394 ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001395 ctlr->auto_runtime_pm = true;
1396 ctlr->transfer_one = sh_msiof_transfer_one;
Geert Uytterhoeven9fda6692019-04-03 17:08:52 +02001397 ctlr->use_gpio_descriptors = true;
Magnus Damm8051eff2009-11-26 11:10:05 +00001398
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001399 ret = sh_msiof_request_dma(p);
1400 if (ret < 0)
1401 dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1402
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001403 ret = devm_spi_register_controller(&pdev->dev, ctlr);
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +01001404 if (ret < 0) {
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001405 dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +01001406 goto err2;
1407 }
Magnus Damm8051eff2009-11-26 11:10:05 +00001408
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +01001409 return 0;
Magnus Damm8051eff2009-11-26 11:10:05 +00001410
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +01001411 err2:
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001412 sh_msiof_release_dma(p);
Magnus Damm8051eff2009-11-26 11:10:05 +00001413 pm_runtime_disable(&pdev->dev);
Magnus Damm8051eff2009-11-26 11:10:05 +00001414 err1:
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001415 spi_controller_put(ctlr);
Magnus Damm8051eff2009-11-26 11:10:05 +00001416 return ret;
1417}
1418
1419static int sh_msiof_spi_remove(struct platform_device *pdev)
1420{
Geert Uytterhoevenb0d0ce82014-06-30 12:10:24 +02001421 struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1422
1423 sh_msiof_release_dma(p);
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +01001424 pm_runtime_disable(&pdev->dev);
Geert Uytterhoeven1bd6363bc02014-02-25 11:21:13 +01001425 return 0;
Magnus Damm8051eff2009-11-26 11:10:05 +00001426}
1427
Krzysztof Kozlowski3789c85202015-05-02 00:44:07 +09001428static const struct platform_device_id spi_driver_ids[] = {
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +01001429 { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001430 {},
1431};
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +01001432MODULE_DEVICE_TABLE(platform, spi_driver_ids);
Bastian Hechtcf9c86e2012-12-12 12:54:48 +01001433
Gaku Inamiffa69d62018-09-05 10:49:36 +02001434#ifdef CONFIG_PM_SLEEP
1435static int sh_msiof_spi_suspend(struct device *dev)
1436{
Wolfram Sang07c7df32018-10-21 22:00:46 +02001437 struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
Gaku Inamiffa69d62018-09-05 10:49:36 +02001438
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001439 return spi_controller_suspend(p->ctlr);
Gaku Inamiffa69d62018-09-05 10:49:36 +02001440}
1441
1442static int sh_msiof_spi_resume(struct device *dev)
1443{
Wolfram Sang07c7df32018-10-21 22:00:46 +02001444 struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
Gaku Inamiffa69d62018-09-05 10:49:36 +02001445
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001446 return spi_controller_resume(p->ctlr);
Gaku Inamiffa69d62018-09-05 10:49:36 +02001447}
1448
1449static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
1450 sh_msiof_spi_resume);
1451#define DEV_PM_OPS &sh_msiof_spi_pm_ops
1452#else
1453#define DEV_PM_OPS NULL
1454#endif /* CONFIG_PM_SLEEP */
1455
Magnus Damm8051eff2009-11-26 11:10:05 +00001456static struct platform_driver sh_msiof_spi_drv = {
1457 .probe = sh_msiof_spi_probe,
1458 .remove = sh_msiof_spi_remove,
Geert Uytterhoeven50a7e232014-02-25 11:21:09 +01001459 .id_table = spi_driver_ids,
Magnus Damm8051eff2009-11-26 11:10:05 +00001460 .driver = {
1461 .name = "spi_sh_msiof",
Gaku Inamiffa69d62018-09-05 10:49:36 +02001462 .pm = DEV_PM_OPS,
Sachin Kamat691ee4e2013-03-14 15:31:51 +05301463 .of_match_table = of_match_ptr(sh_msiof_match),
Magnus Damm8051eff2009-11-26 11:10:05 +00001464 },
1465};
Grant Likely940ab882011-10-05 11:29:49 -06001466module_platform_driver(sh_msiof_spi_drv);
Magnus Damm8051eff2009-11-26 11:10:05 +00001467
Geert Uytterhoeven35c35fd2019-02-08 10:09:09 +01001468MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
Magnus Damm8051eff2009-11-26 11:10:05 +00001469MODULE_AUTHOR("Magnus Damm");
1470MODULE_LICENSE("GPL v2");
1471MODULE_ALIAS("platform:spi_sh_msiof");