blob: 7b2f8a8c2d31939699a70b2a6c7bd3fd648ab7ed [file] [log] [blame]
Kunihiko Hayashi667b9252020-02-21 16:52:30 +09001// SPDX-License-Identifier: GPL-2.0
2/*
3 * External DMA controller driver for UniPhier SoCs
4 * Copyright 2019 Socionext Inc.
5 * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
6 */
7
8#include <linux/bitops.h>
9#include <linux/bitfield.h>
10#include <linux/iopoll.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_dma.h>
14#include <linux/platform_device.h>
15
16#include "dmaengine.h"
17#include "virt-dma.h"
18
19#define XDMAC_CH_WIDTH 0x100
20
21#define XDMAC_TFA 0x08
22#define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
23#define XDMAC_TFA_MASK GENMASK(5, 0)
24#define XDMAC_SADM 0x10
25#define XDMAC_SADM_STW_MASK GENMASK(25, 24)
26#define XDMAC_SADM_SAM BIT(4)
27#define XDMAC_SADM_SAM_FIXED XDMAC_SADM_SAM
28#define XDMAC_SADM_SAM_INC 0
29#define XDMAC_DADM 0x14
30#define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
31#define XDMAC_DADM_DAM XDMAC_SADM_SAM
32#define XDMAC_DADM_DAM_FIXED XDMAC_SADM_SAM_FIXED
33#define XDMAC_DADM_DAM_INC XDMAC_SADM_SAM_INC
34#define XDMAC_EXSAD 0x18
35#define XDMAC_EXDAD 0x1c
36#define XDMAC_SAD 0x20
37#define XDMAC_DAD 0x24
38#define XDMAC_ITS 0x28
39#define XDMAC_ITS_MASK GENMASK(25, 0)
40#define XDMAC_TNUM 0x2c
41#define XDMAC_TNUM_MASK GENMASK(15, 0)
42#define XDMAC_TSS 0x30
43#define XDMAC_TSS_REQ BIT(0)
44#define XDMAC_IEN 0x34
45#define XDMAC_IEN_ERRIEN BIT(1)
46#define XDMAC_IEN_ENDIEN BIT(0)
47#define XDMAC_STAT 0x40
48#define XDMAC_STAT_TENF BIT(0)
49#define XDMAC_IR 0x44
50#define XDMAC_IR_ERRF BIT(1)
51#define XDMAC_IR_ENDF BIT(0)
52#define XDMAC_ID 0x48
53#define XDMAC_ID_ERRIDF BIT(1)
54#define XDMAC_ID_ENDIDF BIT(0)
55
56#define XDMAC_MAX_CHANS 16
57#define XDMAC_INTERVAL_CLKS 20
58#define XDMAC_MAX_WORDS XDMAC_TNUM_MASK
59
60/* cut lower bit for maintain alignment of maximum transfer size */
61#define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
62
63#define UNIPHIER_XDMAC_BUSWIDTHS \
64 (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
65 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
66 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
67 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
68
69struct uniphier_xdmac_desc_node {
70 dma_addr_t src;
71 dma_addr_t dst;
72 u32 burst_size;
73 u32 nr_burst;
74};
75
76struct uniphier_xdmac_desc {
77 struct virt_dma_desc vd;
78
79 unsigned int nr_node;
80 unsigned int cur_node;
81 enum dma_transfer_direction dir;
82 struct uniphier_xdmac_desc_node nodes[];
83};
84
85struct uniphier_xdmac_chan {
86 struct virt_dma_chan vc;
87 struct uniphier_xdmac_device *xdev;
88 struct uniphier_xdmac_desc *xd;
89 void __iomem *reg_ch_base;
90 struct dma_slave_config sconfig;
91 int id;
92 unsigned int req_factor;
93};
94
95struct uniphier_xdmac_device {
96 struct dma_device ddev;
97 void __iomem *reg_base;
98 int nr_chans;
99 struct uniphier_xdmac_chan channels[];
100};
101
102static struct uniphier_xdmac_chan *
103to_uniphier_xdmac_chan(struct virt_dma_chan *vc)
104{
105 return container_of(vc, struct uniphier_xdmac_chan, vc);
106}
107
108static struct uniphier_xdmac_desc *
109to_uniphier_xdmac_desc(struct virt_dma_desc *vd)
110{
111 return container_of(vd, struct uniphier_xdmac_desc, vd);
112}
113
114/* xc->vc.lock must be held by caller */
115static struct uniphier_xdmac_desc *
116uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
117{
118 struct virt_dma_desc *vd;
119
120 vd = vchan_next_desc(&xc->vc);
121 if (!vd)
122 return NULL;
123
124 list_del(&vd->node);
125
126 return to_uniphier_xdmac_desc(vd);
127}
128
129/* xc->vc.lock must be held by caller */
130static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
131 struct uniphier_xdmac_desc *xd)
132{
133 u32 src_mode, src_addr, src_width;
134 u32 dst_mode, dst_addr, dst_width;
135 u32 val, its, tnum;
136 enum dma_slave_buswidth buswidth;
137
138 src_addr = xd->nodes[xd->cur_node].src;
139 dst_addr = xd->nodes[xd->cur_node].dst;
140 its = xd->nodes[xd->cur_node].burst_size;
141 tnum = xd->nodes[xd->cur_node].nr_burst;
142
143 /*
144 * The width of MEM side must be 4 or 8 bytes, that does not
145 * affect that of DEV side and transfer size.
146 */
147 if (xd->dir == DMA_DEV_TO_MEM) {
148 src_mode = XDMAC_SADM_SAM_FIXED;
149 buswidth = xc->sconfig.src_addr_width;
150 } else {
151 src_mode = XDMAC_SADM_SAM_INC;
152 buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
153 }
154 src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
155
156 if (xd->dir == DMA_MEM_TO_DEV) {
157 dst_mode = XDMAC_DADM_DAM_FIXED;
158 buswidth = xc->sconfig.dst_addr_width;
159 } else {
160 dst_mode = XDMAC_DADM_DAM_INC;
161 buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
162 }
163 dst_width = FIELD_PREP(XDMAC_DADM_DTW_MASK, __ffs(buswidth));
164
165 /* setup transfer factor */
166 val = FIELD_PREP(XDMAC_TFA_MCNT_MASK, XDMAC_INTERVAL_CLKS);
167 val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
168 writel(val, xc->reg_ch_base + XDMAC_TFA);
169
170 /* setup the channel */
171 writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
172 writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
173
174 writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
175 writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
176
177 src_mode |= src_width;
178 dst_mode |= dst_width;
179 writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
180 writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
181
182 writel(its, xc->reg_ch_base + XDMAC_ITS);
183 writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
184
185 /* enable interrupt */
186 writel(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN,
187 xc->reg_ch_base + XDMAC_IEN);
188
189 /* start XDMAC */
190 val = readl(xc->reg_ch_base + XDMAC_TSS);
191 val |= XDMAC_TSS_REQ;
192 writel(val, xc->reg_ch_base + XDMAC_TSS);
193}
194
195/* xc->vc.lock must be held by caller */
196static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
197{
198 u32 val;
199
200 /* disable interrupt */
201 val = readl(xc->reg_ch_base + XDMAC_IEN);
202 val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
203 writel(val, xc->reg_ch_base + XDMAC_IEN);
204
205 /* stop XDMAC */
206 val = readl(xc->reg_ch_base + XDMAC_TSS);
207 val &= ~XDMAC_TSS_REQ;
208 writel(0, xc->reg_ch_base + XDMAC_TSS);
209
210 /* wait until transfer is stopped */
211 return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
212 !(val & XDMAC_STAT_TENF), 100, 1000);
213}
214
215/* xc->vc.lock must be held by caller */
216static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
217{
218 struct uniphier_xdmac_desc *xd;
219
220 xd = uniphier_xdmac_next_desc(xc);
221 if (xd)
222 uniphier_xdmac_chan_start(xc, xd);
223
224 /* set desc to chan regardless of xd is null */
225 xc->xd = xd;
226}
227
228static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
229{
230 u32 stat;
231 int ret;
232
233 spin_lock(&xc->vc.lock);
234
235 stat = readl(xc->reg_ch_base + XDMAC_ID);
236
237 if (stat & XDMAC_ID_ERRIDF) {
238 ret = uniphier_xdmac_chan_stop(xc);
239 if (ret)
240 dev_err(xc->xdev->ddev.dev,
241 "DMA transfer error with aborting issue\n");
242 else
243 dev_err(xc->xdev->ddev.dev,
244 "DMA transfer error\n");
245
246 } else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
247 xc->xd->cur_node++;
248 if (xc->xd->cur_node >= xc->xd->nr_node) {
249 vchan_cookie_complete(&xc->xd->vd);
250 uniphier_xdmac_start(xc);
251 } else {
252 uniphier_xdmac_chan_start(xc, xc->xd);
253 }
254 }
255
256 /* write bits to clear */
257 writel(stat, xc->reg_ch_base + XDMAC_IR);
258
259 spin_unlock(&xc->vc.lock);
260}
261
262static irqreturn_t uniphier_xdmac_irq_handler(int irq, void *dev_id)
263{
264 struct uniphier_xdmac_device *xdev = dev_id;
265 int i;
266
267 for (i = 0; i < xdev->nr_chans; i++)
268 uniphier_xdmac_chan_irq(&xdev->channels[i]);
269
270 return IRQ_HANDLED;
271}
272
273static void uniphier_xdmac_free_chan_resources(struct dma_chan *chan)
274{
275 vchan_free_chan_resources(to_virt_chan(chan));
276}
277
278static struct dma_async_tx_descriptor *
279uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
280 dma_addr_t src, size_t len, unsigned long flags)
281{
282 struct virt_dma_chan *vc = to_virt_chan(chan);
283 struct uniphier_xdmac_desc *xd;
284 unsigned int nr;
285 size_t burst_size, tlen;
286 int i;
287
288 if (len > XDMAC_MAX_WORD_SIZE * XDMAC_MAX_WORDS)
289 return NULL;
290
291 nr = 1 + len / XDMAC_MAX_WORD_SIZE;
292
293 xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
294 if (!xd)
295 return NULL;
296
297 for (i = 0; i < nr; i++) {
298 burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
299 xd->nodes[i].src = src;
300 xd->nodes[i].dst = dst;
301 xd->nodes[i].burst_size = burst_size;
302 xd->nodes[i].nr_burst = len / burst_size;
303 tlen = rounddown(len, burst_size);
304 src += tlen;
305 dst += tlen;
306 len -= tlen;
307 }
308
309 xd->dir = DMA_MEM_TO_MEM;
310 xd->nr_node = nr;
311 xd->cur_node = 0;
312
313 return vchan_tx_prep(vc, &xd->vd, flags);
314}
315
316static struct dma_async_tx_descriptor *
317uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
318 unsigned int sg_len,
319 enum dma_transfer_direction direction,
320 unsigned long flags, void *context)
321{
322 struct virt_dma_chan *vc = to_virt_chan(chan);
323 struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
324 struct uniphier_xdmac_desc *xd;
325 struct scatterlist *sg;
326 enum dma_slave_buswidth buswidth;
327 u32 maxburst;
328 int i;
329
330 if (!is_slave_direction(direction))
331 return NULL;
332
333 if (direction == DMA_DEV_TO_MEM) {
334 buswidth = xc->sconfig.src_addr_width;
335 maxburst = xc->sconfig.src_maxburst;
336 } else {
337 buswidth = xc->sconfig.dst_addr_width;
338 maxburst = xc->sconfig.dst_maxburst;
339 }
340
341 if (!maxburst)
342 maxburst = 1;
343 if (maxburst > xc->xdev->ddev.max_burst) {
344 dev_err(xc->xdev->ddev.dev,
345 "Exceed maximum number of burst words\n");
346 return NULL;
347 }
348
349 xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
350 if (!xd)
351 return NULL;
352
353 for_each_sg(sgl, sg, sg_len, i) {
354 xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
355 ? xc->sconfig.src_addr : sg_dma_address(sg);
356 xd->nodes[i].dst = (direction == DMA_MEM_TO_DEV)
357 ? xc->sconfig.dst_addr : sg_dma_address(sg);
358 xd->nodes[i].burst_size = maxburst * buswidth;
359 xd->nodes[i].nr_burst =
360 sg_dma_len(sg) / xd->nodes[i].burst_size;
361
362 /*
363 * Currently transfer that size doesn't align the unit size
364 * (the number of burst words * bus-width) is not allowed,
365 * because the driver does not support the way to transfer
366 * residue size. As a matter of fact, in order to transfer
367 * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
368 * dma_slave_config must be 1.
369 */
370 if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
371 dev_err(xc->xdev->ddev.dev,
372 "Unaligned transfer size: %d", sg_dma_len(sg));
373 kfree(xd);
374 return NULL;
375 }
376
377 if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
378 dev_err(xc->xdev->ddev.dev,
379 "Exceed maximum transfer size");
380 kfree(xd);
381 return NULL;
382 }
383 }
384
385 xd->dir = direction;
386 xd->nr_node = sg_len;
387 xd->cur_node = 0;
388
389 return vchan_tx_prep(vc, &xd->vd, flags);
390}
391
392static int uniphier_xdmac_slave_config(struct dma_chan *chan,
393 struct dma_slave_config *config)
394{
395 struct virt_dma_chan *vc = to_virt_chan(chan);
396 struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
397
398 memcpy(&xc->sconfig, config, sizeof(*config));
399
400 return 0;
401}
402
403static int uniphier_xdmac_terminate_all(struct dma_chan *chan)
404{
405 struct virt_dma_chan *vc = to_virt_chan(chan);
406 struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
407 unsigned long flags;
408 int ret = 0;
409 LIST_HEAD(head);
410
411 spin_lock_irqsave(&vc->lock, flags);
412
413 if (xc->xd) {
414 vchan_terminate_vdesc(&xc->xd->vd);
415 xc->xd = NULL;
416 ret = uniphier_xdmac_chan_stop(xc);
417 }
418
419 vchan_get_all_descriptors(vc, &head);
420
421 spin_unlock_irqrestore(&vc->lock, flags);
422
423 vchan_dma_desc_free_list(vc, &head);
424
425 return ret;
426}
427
428static void uniphier_xdmac_synchronize(struct dma_chan *chan)
429{
430 vchan_synchronize(to_virt_chan(chan));
431}
432
433static void uniphier_xdmac_issue_pending(struct dma_chan *chan)
434{
435 struct virt_dma_chan *vc = to_virt_chan(chan);
436 struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
437 unsigned long flags;
438
439 spin_lock_irqsave(&vc->lock, flags);
440
441 if (vchan_issue_pending(vc) && !xc->xd)
442 uniphier_xdmac_start(xc);
443
444 spin_unlock_irqrestore(&vc->lock, flags);
445}
446
447static void uniphier_xdmac_desc_free(struct virt_dma_desc *vd)
448{
449 kfree(to_uniphier_xdmac_desc(vd));
450}
451
452static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device *xdev,
453 int ch)
454{
455 struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
456
457 xc->xdev = xdev;
458 xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
459 xc->vc.desc_free = uniphier_xdmac_desc_free;
460
461 vchan_init(&xc->vc, &xdev->ddev);
462}
463
464static struct dma_chan *of_dma_uniphier_xlate(struct of_phandle_args *dma_spec,
465 struct of_dma *ofdma)
466{
467 struct uniphier_xdmac_device *xdev = ofdma->of_dma_data;
468 int chan_id = dma_spec->args[0];
469
470 if (chan_id >= xdev->nr_chans)
471 return NULL;
472
473 xdev->channels[chan_id].id = chan_id;
474 xdev->channels[chan_id].req_factor = dma_spec->args[1];
475
476 return dma_get_slave_channel(&xdev->channels[chan_id].vc.chan);
477}
478
479static int uniphier_xdmac_probe(struct platform_device *pdev)
480{
481 struct uniphier_xdmac_device *xdev;
482 struct device *dev = &pdev->dev;
483 struct dma_device *ddev;
484 int irq;
485 int nr_chans;
486 int i, ret;
487
488 if (of_property_read_u32(dev->of_node, "dma-channels", &nr_chans))
489 return -EINVAL;
490 if (nr_chans > XDMAC_MAX_CHANS)
491 nr_chans = XDMAC_MAX_CHANS;
492
493 xdev = devm_kzalloc(dev, struct_size(xdev, channels, nr_chans),
494 GFP_KERNEL);
495 if (!xdev)
496 return -ENOMEM;
497
498 xdev->nr_chans = nr_chans;
499 xdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
500 if (IS_ERR(xdev->reg_base))
501 return PTR_ERR(xdev->reg_base);
502
503 ddev = &xdev->ddev;
504 ddev->dev = dev;
505 dma_cap_zero(ddev->cap_mask);
506 dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
507 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
508 ddev->src_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
509 ddev->dst_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
510 ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
511 BIT(DMA_MEM_TO_MEM);
512 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
513 ddev->max_burst = XDMAC_MAX_WORDS;
514 ddev->device_free_chan_resources = uniphier_xdmac_free_chan_resources;
515 ddev->device_prep_dma_memcpy = uniphier_xdmac_prep_dma_memcpy;
516 ddev->device_prep_slave_sg = uniphier_xdmac_prep_slave_sg;
517 ddev->device_config = uniphier_xdmac_slave_config;
518 ddev->device_terminate_all = uniphier_xdmac_terminate_all;
519 ddev->device_synchronize = uniphier_xdmac_synchronize;
520 ddev->device_tx_status = dma_cookie_status;
521 ddev->device_issue_pending = uniphier_xdmac_issue_pending;
522 INIT_LIST_HEAD(&ddev->channels);
523
524 for (i = 0; i < nr_chans; i++)
525 uniphier_xdmac_chan_init(xdev, i);
526
527 irq = platform_get_irq(pdev, 0);
Vinod Koul0950c7f2020-03-23 22:49:28 +0530528 if (irq < 0)
Kunihiko Hayashi667b9252020-02-21 16:52:30 +0900529 return irq;
Kunihiko Hayashi667b9252020-02-21 16:52:30 +0900530
531 ret = devm_request_irq(dev, irq, uniphier_xdmac_irq_handler,
532 IRQF_SHARED, "xdmac", xdev);
533 if (ret) {
534 dev_err(dev, "Failed to request IRQ\n");
535 return ret;
536 }
537
538 ret = dma_async_device_register(ddev);
539 if (ret) {
540 dev_err(dev, "Failed to register XDMA device\n");
541 return ret;
542 }
543
544 ret = of_dma_controller_register(dev->of_node,
545 of_dma_uniphier_xlate, xdev);
546 if (ret) {
547 dev_err(dev, "Failed to register XDMA controller\n");
548 goto out_unregister_dmac;
549 }
550
551 platform_set_drvdata(pdev, xdev);
552
553 dev_info(&pdev->dev, "UniPhier XDMAC driver (%d channels)\n",
554 nr_chans);
555
556 return 0;
557
558out_unregister_dmac:
559 dma_async_device_unregister(ddev);
560
561 return ret;
562}
563
564static int uniphier_xdmac_remove(struct platform_device *pdev)
565{
566 struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
567 struct dma_device *ddev = &xdev->ddev;
568 struct dma_chan *chan;
569 int ret;
570
571 /*
572 * Before reaching here, almost all descriptors have been freed by the
573 * ->device_free_chan_resources() hook. However, each channel might
574 * be still holding one descriptor that was on-flight at that moment.
575 * Terminate it to make sure this hardware is no longer running. Then,
576 * free the channel resources once again to avoid memory leak.
577 */
578 list_for_each_entry(chan, &ddev->channels, device_node) {
579 ret = dmaengine_terminate_sync(chan);
580 if (ret)
581 return ret;
582 uniphier_xdmac_free_chan_resources(chan);
583 }
584
585 of_dma_controller_free(pdev->dev.of_node);
586 dma_async_device_unregister(ddev);
587
588 return 0;
589}
590
591static const struct of_device_id uniphier_xdmac_match[] = {
592 { .compatible = "socionext,uniphier-xdmac" },
593 { /* sentinel */ }
594};
595MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
596
597static struct platform_driver uniphier_xdmac_driver = {
598 .probe = uniphier_xdmac_probe,
599 .remove = uniphier_xdmac_remove,
600 .driver = {
601 .name = "uniphier-xdmac",
602 .of_match_table = uniphier_xdmac_match,
603 },
604};
605module_platform_driver(uniphier_xdmac_driver);
606
607MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
608MODULE_DESCRIPTION("UniPhier external DMA controller driver");
609MODULE_LICENSE("GPL v2");