blob: 03077a7e11c85979a5193da102c470e07840fc67 [file] [log] [blame]
Artur Rojekae5f94c2021-08-31 01:01:38 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * SPI bus driver for the Ingenic JZ47xx SoCs
4 * Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu>
5 * Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net>
6 */
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/iopoll.h>
13#include <linux/module.h>
14#include <linux/of_device.h>
15#include <linux/platform_device.h>
16#include <linux/regmap.h>
17#include <linux/spi/spi.h>
18
19#define REG_SSIDR 0x0
20#define REG_SSICR0 0x4
21#define REG_SSICR1 0x8
22#define REG_SSISR 0xc
23#define REG_SSIGR 0x18
24
25#define REG_SSICR0_TENDIAN_LSB BIT(19)
26#define REG_SSICR0_RENDIAN_LSB BIT(17)
27#define REG_SSICR0_SSIE BIT(15)
28#define REG_SSICR0_LOOP BIT(10)
29#define REG_SSICR0_EACLRUN BIT(7)
30#define REG_SSICR0_FSEL BIT(6)
31#define REG_SSICR0_TFLUSH BIT(2)
32#define REG_SSICR0_RFLUSH BIT(1)
33
34#define REG_SSICR1_FRMHL_MASK (BIT(31) | BIT(30))
35#define REG_SSICR1_FRMHL BIT(30)
36#define REG_SSICR1_LFST BIT(25)
37#define REG_SSICR1_UNFIN BIT(23)
38#define REG_SSICR1_PHA BIT(1)
39#define REG_SSICR1_POL BIT(0)
40
41#define REG_SSISR_END BIT(7)
42#define REG_SSISR_BUSY BIT(6)
43#define REG_SSISR_TFF BIT(5)
44#define REG_SSISR_RFE BIT(4)
45#define REG_SSISR_RFHF BIT(2)
46#define REG_SSISR_UNDR BIT(1)
47#define REG_SSISR_OVER BIT(0)
48
49#define SPI_INGENIC_FIFO_SIZE 128u
50
51struct jz_soc_info {
52 u32 bits_per_word_mask;
53 struct reg_field flen_field;
54 bool has_trendian;
55};
56
57struct ingenic_spi {
58 const struct jz_soc_info *soc_info;
59 struct clk *clk;
60 struct resource *mem_res;
61
62 struct regmap *map;
63 struct regmap_field *flen_field;
64};
65
66static int spi_ingenic_wait(struct ingenic_spi *priv,
67 unsigned long mask,
68 bool condition)
69{
70 unsigned int val;
71
72 return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
73 !!(val & mask) == condition,
74 100, 10000);
75}
76
77static void spi_ingenic_set_cs(struct spi_device *spi, bool disable)
78{
79 struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller);
80
81 if (disable) {
82 regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
83 regmap_clear_bits(priv->map, REG_SSISR,
84 REG_SSISR_UNDR | REG_SSISR_OVER);
85
86 spi_ingenic_wait(priv, REG_SSISR_END, true);
87 } else {
88 regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
89 }
90
91 regmap_set_bits(priv->map, REG_SSICR0,
92 REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH);
93}
94
95static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv,
96 struct spi_device *spi,
97 struct spi_transfer *xfer)
98{
99 unsigned long clk_hz = clk_get_rate(priv->clk);
100 u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz,
101 bits_per_word = xfer->bits_per_word ?: spi->bits_per_word;
102
103 cdiv = clk_hz / (speed_hz * 2);
104 cdiv = clamp(cdiv, 1u, 0x100u) - 1;
105
106 regmap_write(priv->map, REG_SSIGR, cdiv);
107
108 regmap_field_write(priv->flen_field, bits_per_word - 2);
109}
110
111static void spi_ingenic_finalize_transfer(void *controller)
112{
113 spi_finalize_current_transfer(controller);
114}
115
116static struct dma_async_tx_descriptor *
117spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan,
118 struct sg_table *sg, enum dma_transfer_direction dir,
119 unsigned int bits)
120{
121 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
122 struct dma_slave_config cfg = {
123 .direction = dir,
124 .src_addr = priv->mem_res->start + REG_SSIDR,
125 .dst_addr = priv->mem_res->start + REG_SSIDR,
126 };
127 struct dma_async_tx_descriptor *desc;
128 dma_cookie_t cookie;
129 int ret;
130
131 if (bits > 16) {
132 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
133 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
134 cfg.src_maxburst = cfg.dst_maxburst = 4;
135 } else if (bits > 8) {
136 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
137 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
138 cfg.src_maxburst = cfg.dst_maxburst = 2;
139 } else {
140 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
141 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
142 cfg.src_maxburst = cfg.dst_maxburst = 1;
143 }
144
145 ret = dmaengine_slave_config(chan, &cfg);
146 if (ret)
147 return ERR_PTR(ret);
148
149 desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir,
150 DMA_PREP_INTERRUPT);
151 if (!desc)
152 return ERR_PTR(-ENOMEM);
153
154 if (dir == DMA_DEV_TO_MEM) {
155 desc->callback = spi_ingenic_finalize_transfer;
156 desc->callback_param = ctlr;
157 }
158
159 cookie = dmaengine_submit(desc);
160
161 ret = dma_submit_error(cookie);
162 if (ret) {
163 dmaengine_desc_free(desc);
164 return ERR_PTR(ret);
165 }
166
167 return desc;
168}
169
170static int spi_ingenic_dma_tx(struct spi_controller *ctlr,
171 struct spi_transfer *xfer, unsigned int bits)
172{
173 struct dma_async_tx_descriptor *rx_desc, *tx_desc;
174
175 rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx,
176 &xfer->rx_sg, DMA_DEV_TO_MEM, bits);
177 if (IS_ERR(rx_desc))
178 return PTR_ERR(rx_desc);
179
180 tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx,
181 &xfer->tx_sg, DMA_MEM_TO_DEV, bits);
182 if (IS_ERR(tx_desc)) {
183 dmaengine_terminate_async(ctlr->dma_rx);
184 dmaengine_desc_free(rx_desc);
185 return PTR_ERR(tx_desc);
186 }
187
188 dma_async_issue_pending(ctlr->dma_rx);
189 dma_async_issue_pending(ctlr->dma_tx);
190
191 return 1;
192}
193
194#define SPI_INGENIC_TX(x) \
195static int spi_ingenic_tx##x(struct ingenic_spi *priv, \
196 struct spi_transfer *xfer) \
197{ \
198 unsigned int count = xfer->len / (x / 8); \
199 unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE); \
200 const u##x *tx_buf = xfer->tx_buf; \
201 u##x *rx_buf = xfer->rx_buf; \
202 unsigned int i, val; \
203 int err; \
204 \
205 /* Fill up the TX fifo */ \
206 for (i = 0; i < prefill; i++) { \
207 val = tx_buf ? tx_buf[i] : 0; \
208 \
209 regmap_write(priv->map, REG_SSIDR, val); \
210 } \
211 \
212 for (i = 0; i < count; i++) { \
213 err = spi_ingenic_wait(priv, REG_SSISR_RFE, false); \
214 if (err) \
215 return err; \
216 \
217 regmap_read(priv->map, REG_SSIDR, &val); \
218 if (rx_buf) \
219 rx_buf[i] = val; \
220 \
221 if (i < count - prefill) { \
222 val = tx_buf ? tx_buf[i + prefill] : 0; \
223 \
224 regmap_write(priv->map, REG_SSIDR, val); \
225 } \
226 } \
227 \
228 return 0; \
229}
230SPI_INGENIC_TX(8)
231SPI_INGENIC_TX(16)
232SPI_INGENIC_TX(32)
233#undef SPI_INGENIC_TX
234
235static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
236 struct spi_device *spi,
237 struct spi_transfer *xfer)
238{
239 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
240 unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
241 bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
242
243 spi_ingenic_prepare_transfer(priv, spi, xfer);
244
245 if (ctlr->cur_msg_mapped && can_dma)
246 return spi_ingenic_dma_tx(ctlr, xfer, bits);
247
248 if (bits > 16)
249 return spi_ingenic_tx32(priv, xfer);
250
251 if (bits > 8)
252 return spi_ingenic_tx16(priv, xfer);
253
254 return spi_ingenic_tx8(priv, xfer);
255}
256
257static int spi_ingenic_prepare_message(struct spi_controller *ctlr,
258 struct spi_message *message)
259{
260 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
261 struct spi_device *spi = message->spi;
262 unsigned int cs = REG_SSICR1_FRMHL << spi->chip_select;
263 unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL;
264 unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs;
265 unsigned int ssicr0 = 0, ssicr1 = 0;
266
267 if (priv->soc_info->has_trendian) {
268 ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
269
270 if (spi->mode & SPI_LSB_FIRST)
271 ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
272 } else {
273 ssicr1_mask |= REG_SSICR1_LFST;
274
275 if (spi->mode & SPI_LSB_FIRST)
276 ssicr1 |= REG_SSICR1_LFST;
277 }
278
279 if (spi->mode & SPI_LOOP)
280 ssicr0 |= REG_SSICR0_LOOP;
281 if (spi->chip_select)
282 ssicr0 |= REG_SSICR0_FSEL;
283
284 if (spi->mode & SPI_CPHA)
285 ssicr1 |= REG_SSICR1_PHA;
286 if (spi->mode & SPI_CPOL)
287 ssicr1 |= REG_SSICR1_POL;
288 if (spi->mode & SPI_CS_HIGH)
289 ssicr1 |= cs;
290
291 regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
292 regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
293
294 return 0;
295}
296
297static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr)
298{
299 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
300 int ret;
301
302 ret = clk_prepare_enable(priv->clk);
303 if (ret)
304 return ret;
305
306 regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
307 regmap_write(priv->map, REG_SSICR1, 0);
308 regmap_write(priv->map, REG_SSISR, 0);
309 regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
310
311 return 0;
312}
313
314static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr)
315{
316 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
317
318 regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
319
320 clk_disable_unprepare(priv->clk);
321
322 return 0;
323}
324
325static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
326 struct spi_device *spi,
327 struct spi_transfer *xfer)
328{
329 struct dma_slave_caps caps;
330 int ret;
331
332 ret = dma_get_slave_caps(ctlr->dma_tx, &caps);
333 if (ret) {
334 dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret);
335 return false;
336 }
337
338 return !caps.max_sg_burst ||
339 xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE;
340}
341
342static int spi_ingenic_request_dma(struct spi_controller *ctlr,
343 struct device *dev)
344{
345 ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
346 if (!ctlr->dma_tx)
347 return -ENODEV;
348
349 ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
350
351 if (!ctlr->dma_rx)
352 return -ENODEV;
353
354 ctlr->can_dma = spi_ingenic_can_dma;
355
356 return 0;
357}
358
359static void spi_ingenic_release_dma(void *data)
360{
361 struct spi_controller *ctlr = data;
362
363 if (ctlr->dma_tx)
364 dma_release_channel(ctlr->dma_tx);
365 if (ctlr->dma_rx)
366 dma_release_channel(ctlr->dma_rx);
367}
368
369static const struct regmap_config spi_ingenic_regmap_config = {
370 .reg_bits = 32,
371 .val_bits = 32,
372 .reg_stride = 4,
373 .max_register = REG_SSIGR,
374};
375
376static int spi_ingenic_probe(struct platform_device *pdev)
377{
378 const struct jz_soc_info *pdata;
379 struct device *dev = &pdev->dev;
380 struct spi_controller *ctlr;
381 struct ingenic_spi *priv;
382 void __iomem *base;
383 int ret;
384
385 pdata = of_device_get_match_data(dev);
386 if (!pdata) {
387 dev_err(dev, "Missing platform data.\n");
388 return -EINVAL;
389 }
390
391 ctlr = devm_spi_alloc_master(dev, sizeof(*priv));
392 if (!ctlr) {
393 dev_err(dev, "Unable to allocate SPI controller.\n");
394 return -ENOMEM;
395 }
396
397 priv = spi_controller_get_devdata(ctlr);
398 priv->soc_info = pdata;
399
400 priv->clk = devm_clk_get(dev, NULL);
401 if (IS_ERR(priv->clk)) {
402 return dev_err_probe(dev, PTR_ERR(priv->clk),
403 "Unable to get clock.\n");
404 }
405
406 base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res);
407 if (IS_ERR(base))
408 return PTR_ERR(base);
409
410 priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
411 if (IS_ERR(priv->map))
412 return PTR_ERR(priv->map);
413
414 priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
415 pdata->flen_field);
416 if (IS_ERR(priv->flen_field))
417 return PTR_ERR(priv->flen_field);
418
419 platform_set_drvdata(pdev, ctlr);
420
421 ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
422 ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware;
423 ctlr->prepare_message = spi_ingenic_prepare_message;
424 ctlr->set_cs = spi_ingenic_set_cs;
425 ctlr->transfer_one = spi_ingenic_transfer_one;
426 ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH;
427 ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
428 ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
429 ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
430 ctlr->min_speed_hz = 7200;
431 ctlr->max_speed_hz = 54000000;
432 ctlr->num_chipselect = 2;
433 ctlr->dev.of_node = pdev->dev.of_node;
434
435 if (spi_ingenic_request_dma(ctlr, dev))
436 dev_warn(dev, "DMA not available.\n");
437
438 ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr);
439 if (ret) {
440 dev_err(dev, "Unable to add action.\n");
441 return ret;
442 }
443
444 ret = devm_spi_register_controller(dev, ctlr);
445 if (ret)
446 dev_err(dev, "Unable to register SPI controller.\n");
447
448 return ret;
449}
450
451static const struct jz_soc_info jz4750_soc_info = {
452 .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
453 .flen_field = REG_FIELD(REG_SSICR1, 4, 7),
454 .has_trendian = false,
455};
456
457static const struct jz_soc_info jz4780_soc_info = {
458 .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
459 .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
460 .has_trendian = true,
461};
462
463static const struct of_device_id spi_ingenic_of_match[] = {
464 { .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
465 { .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
466 {}
467};
468MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
469
470static struct platform_driver spi_ingenic_driver = {
471 .driver = {
472 .name = "spi-ingenic",
473 .of_match_table = spi_ingenic_of_match,
474 },
475 .probe = spi_ingenic_probe,
476};
477
478module_platform_driver(spi_ingenic_driver);
479MODULE_DESCRIPTION("SPI bus driver for the Ingenic JZ47xx SoCs");
480MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
481MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
482MODULE_LICENSE("GPL");