blob: 184d1a2bf9ba74f18fc08b5b5b01b0b3301b0ed7 [file] [log] [blame]
Alex Smithd894fc62015-03-18 16:16:36 +00001/*
2 * Ingenic JZ4780 DMA controller
3 *
4 * Copyright (c) 2015 Imagination Technologies
5 * Author: Alex Smith <alex@alex-smith.me.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/clk.h>
14#include <linux/dmapool.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/of.h>
Paul Cercueil6147b032018-08-29 23:32:45 +020019#include <linux/of_device.h>
Alex Smithd894fc62015-03-18 16:16:36 +000020#include <linux/of_dma.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23
24#include "dmaengine.h"
25#include "virt-dma.h"
26
Alex Smithd894fc62015-03-18 16:16:36 +000027/* Global registers. */
Paul Cercueil33633582018-08-29 23:32:46 +020028#define JZ_DMA_REG_DMAC 0x00
29#define JZ_DMA_REG_DIRQP 0x04
30#define JZ_DMA_REG_DDR 0x08
31#define JZ_DMA_REG_DDRS 0x0c
Paul Cercueil29870eb2018-08-29 23:32:49 +020032#define JZ_DMA_REG_DCKE 0x10
33#define JZ_DMA_REG_DCKES 0x14
34#define JZ_DMA_REG_DCKEC 0x18
Paul Cercueil33633582018-08-29 23:32:46 +020035#define JZ_DMA_REG_DMACP 0x1c
36#define JZ_DMA_REG_DSIRQP 0x20
37#define JZ_DMA_REG_DSIRQM 0x24
38#define JZ_DMA_REG_DCIRQP 0x28
39#define JZ_DMA_REG_DCIRQM 0x2c
Alex Smithd894fc62015-03-18 16:16:36 +000040
41/* Per-channel registers. */
42#define JZ_DMA_REG_CHAN(n) (n * 0x20)
Paul Cercueil33633582018-08-29 23:32:46 +020043#define JZ_DMA_REG_DSA 0x00
44#define JZ_DMA_REG_DTA 0x04
45#define JZ_DMA_REG_DTC 0x08
46#define JZ_DMA_REG_DRT 0x0c
47#define JZ_DMA_REG_DCS 0x10
48#define JZ_DMA_REG_DCM 0x14
49#define JZ_DMA_REG_DDA 0x18
50#define JZ_DMA_REG_DSD 0x1c
Alex Smithd894fc62015-03-18 16:16:36 +000051
52#define JZ_DMA_DMAC_DMAE BIT(0)
53#define JZ_DMA_DMAC_AR BIT(2)
54#define JZ_DMA_DMAC_HLT BIT(3)
55#define JZ_DMA_DMAC_FMSC BIT(31)
56
57#define JZ_DMA_DRT_AUTO 0x8
58
59#define JZ_DMA_DCS_CTE BIT(0)
60#define JZ_DMA_DCS_HLT BIT(2)
61#define JZ_DMA_DCS_TT BIT(3)
62#define JZ_DMA_DCS_AR BIT(4)
63#define JZ_DMA_DCS_DES8 BIT(30)
64
65#define JZ_DMA_DCM_LINK BIT(0)
66#define JZ_DMA_DCM_TIE BIT(1)
67#define JZ_DMA_DCM_STDE BIT(2)
68#define JZ_DMA_DCM_TSZ_SHIFT 8
69#define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
70#define JZ_DMA_DCM_DP_SHIFT 12
71#define JZ_DMA_DCM_SP_SHIFT 14
72#define JZ_DMA_DCM_DAI BIT(22)
73#define JZ_DMA_DCM_SAI BIT(23)
74
75#define JZ_DMA_SIZE_4_BYTE 0x0
76#define JZ_DMA_SIZE_1_BYTE 0x1
77#define JZ_DMA_SIZE_2_BYTE 0x2
78#define JZ_DMA_SIZE_16_BYTE 0x3
79#define JZ_DMA_SIZE_32_BYTE 0x4
80#define JZ_DMA_SIZE_64_BYTE 0x5
81#define JZ_DMA_SIZE_128_BYTE 0x6
82
83#define JZ_DMA_WIDTH_32_BIT 0x0
84#define JZ_DMA_WIDTH_8_BIT 0x1
85#define JZ_DMA_WIDTH_16_BIT 0x2
86
87#define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
88 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
89 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
90
Paul Cercueil33633582018-08-29 23:32:46 +020091#define JZ4780_DMA_CTRL_OFFSET 0x1000
92
Paul Cercueil29870eb2018-08-29 23:32:49 +020093/* macros for use with jz4780_dma_soc_data.flags */
94#define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
95#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
96#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
97
Alex Smithd894fc62015-03-18 16:16:36 +000098/**
99 * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
100 * @dcm: value for the DCM (channel command) register
101 * @dsa: source address
102 * @dta: target address
103 * @dtc: transfer count (number of blocks of the transfer size specified in DCM
104 * to transfer) in the low 24 bits, offset of the next descriptor from the
105 * descriptor base address in the upper 8 bits.
Alex Smithd894fc62015-03-18 16:16:36 +0000106 */
107struct jz4780_dma_hwdesc {
108 uint32_t dcm;
109 uint32_t dsa;
110 uint32_t dta;
111 uint32_t dtc;
Alex Smithd894fc62015-03-18 16:16:36 +0000112};
113
114/* Size of allocations for hardware descriptor blocks. */
115#define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
116#define JZ_DMA_MAX_DESC \
117 (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
118
119struct jz4780_dma_desc {
120 struct virt_dma_desc vdesc;
121
122 struct jz4780_dma_hwdesc *desc;
123 dma_addr_t desc_phys;
124 unsigned int count;
125 enum dma_transaction_type type;
126 uint32_t status;
127};
128
129struct jz4780_dma_chan {
130 struct virt_dma_chan vchan;
131 unsigned int id;
132 struct dma_pool *desc_pool;
133
134 uint32_t transfer_type;
135 uint32_t transfer_shift;
136 struct dma_slave_config config;
137
138 struct jz4780_dma_desc *desc;
139 unsigned int curr_hwdesc;
140};
141
Paul Cercueil6147b032018-08-29 23:32:45 +0200142struct jz4780_dma_soc_data {
143 unsigned int nb_channels;
Paul Cercueil29870eb2018-08-29 23:32:49 +0200144 unsigned int transfer_ord_max;
145 unsigned long flags;
Paul Cercueil6147b032018-08-29 23:32:45 +0200146};
147
Alex Smithd894fc62015-03-18 16:16:36 +0000148struct jz4780_dma_dev {
149 struct dma_device dma_device;
Paul Cercueil33633582018-08-29 23:32:46 +0200150 void __iomem *chn_base;
151 void __iomem *ctrl_base;
Alex Smithd894fc62015-03-18 16:16:36 +0000152 struct clk *clk;
153 unsigned int irq;
Paul Cercueil6147b032018-08-29 23:32:45 +0200154 const struct jz4780_dma_soc_data *soc_data;
Alex Smithd894fc62015-03-18 16:16:36 +0000155
156 uint32_t chan_reserved;
Paul Cercueil6147b032018-08-29 23:32:45 +0200157 struct jz4780_dma_chan chan[];
Alex Smithd894fc62015-03-18 16:16:36 +0000158};
159
Alex Smith026fd402015-07-24 17:24:24 +0100160struct jz4780_dma_filter_data {
161 struct device_node *of_node;
Alex Smithd894fc62015-03-18 16:16:36 +0000162 uint32_t transfer_type;
163 int channel;
164};
165
166static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
167{
168 return container_of(chan, struct jz4780_dma_chan, vchan.chan);
169}
170
171static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
172 struct virt_dma_desc *vdesc)
173{
174 return container_of(vdesc, struct jz4780_dma_desc, vdesc);
175}
176
177static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
178 struct jz4780_dma_chan *jzchan)
179{
180 return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
181 dma_device);
182}
183
Paul Cercueil33633582018-08-29 23:32:46 +0200184static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
185 unsigned int chn, unsigned int reg)
Alex Smithd894fc62015-03-18 16:16:36 +0000186{
Paul Cercueil33633582018-08-29 23:32:46 +0200187 return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
Alex Smithd894fc62015-03-18 16:16:36 +0000188}
189
Paul Cercueil33633582018-08-29 23:32:46 +0200190static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
191 unsigned int chn, unsigned int reg, uint32_t val)
192{
193 writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
194}
195
196static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
197 unsigned int reg)
198{
199 return readl(jzdma->ctrl_base + reg);
200}
201
202static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
Alex Smithd894fc62015-03-18 16:16:36 +0000203 unsigned int reg, uint32_t val)
204{
Paul Cercueil33633582018-08-29 23:32:46 +0200205 writel(val, jzdma->ctrl_base + reg);
Alex Smithd894fc62015-03-18 16:16:36 +0000206}
207
Paul Cercueil29870eb2018-08-29 23:32:49 +0200208static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
209 unsigned int chn)
210{
211 if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM)
212 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKES, BIT(chn));
213}
214
215static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
216 unsigned int chn)
217{
218 if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM)
219 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
220}
221
Alex Smithd894fc62015-03-18 16:16:36 +0000222static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
223 struct jz4780_dma_chan *jzchan, unsigned int count,
224 enum dma_transaction_type type)
225{
226 struct jz4780_dma_desc *desc;
227
228 if (count > JZ_DMA_MAX_DESC)
229 return NULL;
230
231 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
232 if (!desc)
233 return NULL;
234
235 desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
236 &desc->desc_phys);
237 if (!desc->desc) {
238 kfree(desc);
239 return NULL;
240 }
241
242 desc->count = count;
243 desc->type = type;
244 return desc;
245}
246
247static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
248{
249 struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
250 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
251
252 dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
253 kfree(desc);
254}
255
Paul Cercueil29870eb2018-08-29 23:32:49 +0200256static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
257 unsigned long val, uint32_t *shift)
Alex Smithd894fc62015-03-18 16:16:36 +0000258{
Paul Cercueil29870eb2018-08-29 23:32:49 +0200259 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
Alex Smithdc578f32015-07-24 17:24:21 +0100260 int ord = ffs(val) - 1;
Alex Smithd894fc62015-03-18 16:16:36 +0000261
Alex Smithdc578f32015-07-24 17:24:21 +0100262 /*
263 * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
264 * than the maximum, just limit it. It is perfectly safe to fall back
265 * in this way since we won't exceed the maximum burst size supported
266 * by the device, the only effect is reduced efficiency. This is better
267 * than refusing to perform the request at all.
268 */
269 if (ord == 3)
270 ord = 2;
Paul Cercueil29870eb2018-08-29 23:32:49 +0200271 else if (ord > jzdma->soc_data->transfer_ord_max)
272 ord = jzdma->soc_data->transfer_ord_max;
Alex Smithdc578f32015-07-24 17:24:21 +0100273
274 *shift = ord;
275
276 switch (ord) {
Alex Smithd894fc62015-03-18 16:16:36 +0000277 case 0:
278 return JZ_DMA_SIZE_1_BYTE;
279 case 1:
280 return JZ_DMA_SIZE_2_BYTE;
281 case 2:
282 return JZ_DMA_SIZE_4_BYTE;
283 case 4:
284 return JZ_DMA_SIZE_16_BYTE;
285 case 5:
286 return JZ_DMA_SIZE_32_BYTE;
287 case 6:
288 return JZ_DMA_SIZE_64_BYTE;
Alex Smithd894fc62015-03-18 16:16:36 +0000289 default:
Alex Smithdc578f32015-07-24 17:24:21 +0100290 return JZ_DMA_SIZE_128_BYTE;
Alex Smithd894fc62015-03-18 16:16:36 +0000291 }
292}
293
Alex Smith839896e2015-07-24 17:24:22 +0100294static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
Alex Smithd894fc62015-03-18 16:16:36 +0000295 struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
296 enum dma_transfer_direction direction)
297{
298 struct dma_slave_config *config = &jzchan->config;
299 uint32_t width, maxburst, tsz;
Alex Smithd894fc62015-03-18 16:16:36 +0000300
301 if (direction == DMA_MEM_TO_DEV) {
302 desc->dcm = JZ_DMA_DCM_SAI;
303 desc->dsa = addr;
304 desc->dta = config->dst_addr;
Alex Smithd894fc62015-03-18 16:16:36 +0000305
306 width = config->dst_addr_width;
307 maxburst = config->dst_maxburst;
308 } else {
309 desc->dcm = JZ_DMA_DCM_DAI;
310 desc->dsa = config->src_addr;
311 desc->dta = addr;
Alex Smithd894fc62015-03-18 16:16:36 +0000312
313 width = config->src_addr_width;
314 maxburst = config->src_maxburst;
315 }
316
317 /*
318 * This calculates the maximum transfer size that can be used with the
319 * given address, length, width and maximum burst size. The address
320 * must be aligned to the transfer size, the total length must be
321 * divisible by the transfer size, and we must not use more than the
322 * maximum burst specified by the user.
323 */
Paul Cercueil29870eb2018-08-29 23:32:49 +0200324 tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
Alex Smithdc578f32015-07-24 17:24:21 +0100325 &jzchan->transfer_shift);
Alex Smithd894fc62015-03-18 16:16:36 +0000326
327 switch (width) {
328 case DMA_SLAVE_BUSWIDTH_1_BYTE:
329 case DMA_SLAVE_BUSWIDTH_2_BYTES:
330 break;
331 case DMA_SLAVE_BUSWIDTH_4_BYTES:
332 width = JZ_DMA_WIDTH_32_BIT;
333 break;
334 default:
335 return -EINVAL;
336 }
337
338 desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
339 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
340 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
341
Alex Smithdc578f32015-07-24 17:24:21 +0100342 desc->dtc = len >> jzchan->transfer_shift;
Alex Smith839896e2015-07-24 17:24:22 +0100343 return 0;
Alex Smithd894fc62015-03-18 16:16:36 +0000344}
345
346static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
347 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
Alex Smith46fa51682015-07-24 17:24:20 +0100348 enum dma_transfer_direction direction, unsigned long flags,
349 void *context)
Alex Smithd894fc62015-03-18 16:16:36 +0000350{
351 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
352 struct jz4780_dma_desc *desc;
353 unsigned int i;
354 int err;
355
356 desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
357 if (!desc)
358 return NULL;
359
360 for (i = 0; i < sg_len; i++) {
361 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
Alex Smith839896e2015-07-24 17:24:22 +0100362 sg_dma_address(&sgl[i]),
363 sg_dma_len(&sgl[i]),
364 direction);
Colin Ian Kingfc878ef2016-09-29 18:45:05 +0100365 if (err < 0) {
366 jz4780_dma_desc_free(&jzchan->desc->vdesc);
Alex Smith839896e2015-07-24 17:24:22 +0100367 return NULL;
Colin Ian Kingfc878ef2016-09-29 18:45:05 +0100368 }
Alex Smithd894fc62015-03-18 16:16:36 +0000369
370 desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
371
372 if (i != (sg_len - 1)) {
373 /* Automatically proceeed to the next descriptor. */
374 desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
375
376 /*
377 * The upper 8 bits of the DTC field in the descriptor
378 * must be set to (offset from descriptor base of next
379 * descriptor >> 4).
380 */
381 desc->desc[i].dtc |=
382 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
383 }
384 }
385
386 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
387}
388
389static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
390 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
391 size_t period_len, enum dma_transfer_direction direction,
392 unsigned long flags)
393{
394 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
395 struct jz4780_dma_desc *desc;
396 unsigned int periods, i;
397 int err;
398
399 if (buf_len % period_len)
400 return NULL;
401
402 periods = buf_len / period_len;
403
404 desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
405 if (!desc)
406 return NULL;
407
408 for (i = 0; i < periods; i++) {
409 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
Alex Smith839896e2015-07-24 17:24:22 +0100410 period_len, direction);
Colin Ian Kingfc878ef2016-09-29 18:45:05 +0100411 if (err < 0) {
412 jz4780_dma_desc_free(&jzchan->desc->vdesc);
Alex Smith839896e2015-07-24 17:24:22 +0100413 return NULL;
Colin Ian Kingfc878ef2016-09-29 18:45:05 +0100414 }
Alex Smithd894fc62015-03-18 16:16:36 +0000415
416 buf_addr += period_len;
417
418 /*
419 * Set the link bit to indicate that the controller should
420 * automatically proceed to the next descriptor. In
421 * jz4780_dma_begin(), this will be cleared if we need to issue
422 * an interrupt after each period.
423 */
424 desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
425
426 /*
427 * The upper 8 bits of the DTC field in the descriptor must be
428 * set to (offset from descriptor base of next descriptor >> 4).
429 * If this is the last descriptor, link it back to the first,
430 * i.e. leave offset set to 0, otherwise point to the next one.
431 */
432 if (i != (periods - 1)) {
433 desc->desc[i].dtc |=
434 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
435 }
436 }
437
438 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
439}
440
Vinod Koul4f5db8c2016-09-02 15:27:55 +0530441static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
Alex Smithd894fc62015-03-18 16:16:36 +0000442 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
443 size_t len, unsigned long flags)
444{
445 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
446 struct jz4780_dma_desc *desc;
447 uint32_t tsz;
Alex Smithd894fc62015-03-18 16:16:36 +0000448
449 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
450 if (!desc)
451 return NULL;
452
Paul Cercueil29870eb2018-08-29 23:32:49 +0200453 tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
Alex Smithdc578f32015-07-24 17:24:21 +0100454 &jzchan->transfer_shift);
Alex Smithd894fc62015-03-18 16:16:36 +0000455
Paul Cercueil5eed7d82018-08-29 23:32:47 +0200456 jzchan->transfer_type = JZ_DMA_DRT_AUTO;
457
Alex Smithd894fc62015-03-18 16:16:36 +0000458 desc->desc[0].dsa = src;
459 desc->desc[0].dta = dest;
Alex Smithd894fc62015-03-18 16:16:36 +0000460 desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
461 tsz << JZ_DMA_DCM_TSZ_SHIFT |
462 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
463 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
Alex Smith839896e2015-07-24 17:24:22 +0100464 desc->desc[0].dtc = len >> jzchan->transfer_shift;
Alex Smithd894fc62015-03-18 16:16:36 +0000465
466 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
467}
468
469static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
470{
471 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
472 struct virt_dma_desc *vdesc;
473 unsigned int i;
474 dma_addr_t desc_phys;
475
476 if (!jzchan->desc) {
477 vdesc = vchan_next_desc(&jzchan->vchan);
478 if (!vdesc)
479 return;
480
481 list_del(&vdesc->node);
482
483 jzchan->desc = to_jz4780_dma_desc(vdesc);
484 jzchan->curr_hwdesc = 0;
485
486 if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
487 /*
488 * The DMA controller doesn't support triggering an
489 * interrupt after processing each descriptor, only
490 * after processing an entire terminated list of
491 * descriptors. For a cyclic DMA setup the list of
492 * descriptors is not terminated so we can never get an
493 * interrupt.
494 *
495 * If the user requested a callback for a cyclic DMA
496 * setup then we workaround this hardware limitation
497 * here by degrading to a set of unlinked descriptors
498 * which we will submit in sequence in response to the
499 * completion of processing the previous descriptor.
500 */
501 for (i = 0; i < jzchan->desc->count; i++)
502 jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
503 }
504 } else {
505 /*
506 * There is an existing transfer, therefore this must be one
507 * for which we unlinked the descriptors above. Advance to the
508 * next one in the list.
509 */
510 jzchan->curr_hwdesc =
511 (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
512 }
513
Paul Cercueil29870eb2018-08-29 23:32:49 +0200514 /* Enable the channel's clock. */
515 jz4780_dma_chan_enable(jzdma, jzchan->id);
516
Paul Cercueil5eed7d82018-08-29 23:32:47 +0200517 /* Use 4-word descriptors. */
518 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
519
520 /* Set transfer type. */
521 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
522 jzchan->transfer_type);
Alex Smithd894fc62015-03-18 16:16:36 +0000523
524 /* Write descriptor address and initiate descriptor fetch. */
525 desc_phys = jzchan->desc->desc_phys +
526 (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
Paul Cercueil33633582018-08-29 23:32:46 +0200527 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
528 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
Alex Smithd894fc62015-03-18 16:16:36 +0000529
530 /* Enable the channel. */
Paul Cercueil33633582018-08-29 23:32:46 +0200531 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
Paul Cercueil5eed7d82018-08-29 23:32:47 +0200532 JZ_DMA_DCS_CTE);
Alex Smithd894fc62015-03-18 16:16:36 +0000533}
534
535static void jz4780_dma_issue_pending(struct dma_chan *chan)
536{
537 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
538 unsigned long flags;
539
540 spin_lock_irqsave(&jzchan->vchan.lock, flags);
541
542 if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
543 jz4780_dma_begin(jzchan);
544
545 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
546}
547
Alex Smith46fa51682015-07-24 17:24:20 +0100548static int jz4780_dma_terminate_all(struct dma_chan *chan)
Alex Smithd894fc62015-03-18 16:16:36 +0000549{
Alex Smith46fa51682015-07-24 17:24:20 +0100550 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
Alex Smithd894fc62015-03-18 16:16:36 +0000551 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
552 unsigned long flags;
553 LIST_HEAD(head);
554
555 spin_lock_irqsave(&jzchan->vchan.lock, flags);
556
557 /* Clear the DMA status and stop the transfer. */
Paul Cercueil33633582018-08-29 23:32:46 +0200558 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
Alex Smithd894fc62015-03-18 16:16:36 +0000559 if (jzchan->desc) {
Peter Ujfalusif0dd52c2017-11-14 16:32:08 +0200560 vchan_terminate_vdesc(&jzchan->desc->vdesc);
Alex Smithd894fc62015-03-18 16:16:36 +0000561 jzchan->desc = NULL;
562 }
563
Paul Cercueil29870eb2018-08-29 23:32:49 +0200564 jz4780_dma_chan_disable(jzdma, jzchan->id);
565
Alex Smithd894fc62015-03-18 16:16:36 +0000566 vchan_get_all_descriptors(&jzchan->vchan, &head);
567
568 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
569
570 vchan_dma_desc_free_list(&jzchan->vchan, &head);
571 return 0;
572}
573
Peter Ujfalusif0dd52c2017-11-14 16:32:08 +0200574static void jz4780_dma_synchronize(struct dma_chan *chan)
575{
576 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
Paul Cercueil29870eb2018-08-29 23:32:49 +0200577 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
Peter Ujfalusif0dd52c2017-11-14 16:32:08 +0200578
579 vchan_synchronize(&jzchan->vchan);
Paul Cercueil29870eb2018-08-29 23:32:49 +0200580 jz4780_dma_chan_disable(jzdma, jzchan->id);
Peter Ujfalusif0dd52c2017-11-14 16:32:08 +0200581}
582
Alex Smith46fa51682015-07-24 17:24:20 +0100583static int jz4780_dma_config(struct dma_chan *chan,
584 struct dma_slave_config *config)
Alex Smithd894fc62015-03-18 16:16:36 +0000585{
Alex Smith46fa51682015-07-24 17:24:20 +0100586 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
587
Alex Smithd894fc62015-03-18 16:16:36 +0000588 if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
589 || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
590 return -EINVAL;
591
592 /* Copy the reset of the slave configuration, it is used later. */
593 memcpy(&jzchan->config, config, sizeof(jzchan->config));
594
595 return 0;
596}
597
598static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
599 struct jz4780_dma_desc *desc, unsigned int next_sg)
600{
601 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
602 unsigned int residue, count;
603 unsigned int i;
604
605 residue = 0;
606
607 for (i = next_sg; i < desc->count; i++)
608 residue += desc->desc[i].dtc << jzchan->transfer_shift;
609
610 if (next_sg != 0) {
Paul Cercueil33633582018-08-29 23:32:46 +0200611 count = jz4780_dma_chn_readl(jzdma, jzchan->id,
612 JZ_DMA_REG_DTC);
Alex Smithd894fc62015-03-18 16:16:36 +0000613 residue += count << jzchan->transfer_shift;
614 }
615
616 return residue;
617}
618
619static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
620 dma_cookie_t cookie, struct dma_tx_state *txstate)
621{
622 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
623 struct virt_dma_desc *vdesc;
624 enum dma_status status;
625 unsigned long flags;
626
627 status = dma_cookie_status(chan, cookie, txstate);
628 if ((status == DMA_COMPLETE) || (txstate == NULL))
629 return status;
630
631 spin_lock_irqsave(&jzchan->vchan.lock, flags);
632
633 vdesc = vchan_find_desc(&jzchan->vchan, cookie);
634 if (vdesc) {
635 /* On the issued list, so hasn't been processed yet */
636 txstate->residue = jz4780_dma_desc_residue(jzchan,
637 to_jz4780_dma_desc(vdesc), 0);
638 } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
639 txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
640 (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
641 } else
642 txstate->residue = 0;
643
644 if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
Alex Smith839896e2015-07-24 17:24:22 +0100645 && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
646 status = DMA_ERROR;
Alex Smithd894fc62015-03-18 16:16:36 +0000647
648 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
649 return status;
650}
651
652static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
653 struct jz4780_dma_chan *jzchan)
654{
655 uint32_t dcs;
656
657 spin_lock(&jzchan->vchan.lock);
658
Paul Cercueil33633582018-08-29 23:32:46 +0200659 dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
660 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
Alex Smithd894fc62015-03-18 16:16:36 +0000661
662 if (dcs & JZ_DMA_DCS_AR) {
663 dev_warn(&jzchan->vchan.chan.dev->device,
664 "address error (DCS=0x%x)\n", dcs);
665 }
666
667 if (dcs & JZ_DMA_DCS_HLT) {
668 dev_warn(&jzchan->vchan.chan.dev->device,
669 "channel halt (DCS=0x%x)\n", dcs);
670 }
671
672 if (jzchan->desc) {
673 jzchan->desc->status = dcs;
674
675 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
676 if (jzchan->desc->type == DMA_CYCLIC) {
677 vchan_cyclic_callback(&jzchan->desc->vdesc);
678 } else {
679 vchan_cookie_complete(&jzchan->desc->vdesc);
680 jzchan->desc = NULL;
681 }
682
683 jz4780_dma_begin(jzchan);
684 }
685 } else {
686 dev_err(&jzchan->vchan.chan.dev->device,
687 "channel IRQ with no active transfer\n");
688 }
689
690 spin_unlock(&jzchan->vchan.lock);
691}
692
693static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
694{
695 struct jz4780_dma_dev *jzdma = data;
696 uint32_t pending, dmac;
697 int i;
698
Paul Cercueil33633582018-08-29 23:32:46 +0200699 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
Alex Smithd894fc62015-03-18 16:16:36 +0000700
Paul Cercueil6147b032018-08-29 23:32:45 +0200701 for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
Alex Smithd894fc62015-03-18 16:16:36 +0000702 if (!(pending & (1<<i)))
703 continue;
704
705 jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
706 }
707
708 /* Clear halt and address error status of all channels. */
Paul Cercueil33633582018-08-29 23:32:46 +0200709 dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
Alex Smithd894fc62015-03-18 16:16:36 +0000710 dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
Paul Cercueil33633582018-08-29 23:32:46 +0200711 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
Alex Smithd894fc62015-03-18 16:16:36 +0000712
713 /* Clear interrupt pending status. */
Paul Cercueil33633582018-08-29 23:32:46 +0200714 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
Alex Smithd894fc62015-03-18 16:16:36 +0000715
716 return IRQ_HANDLED;
717}
718
719static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
720{
721 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
722
723 jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
724 chan->device->dev,
725 JZ_DMA_DESC_BLOCK_SIZE,
726 PAGE_SIZE, 0);
727 if (!jzchan->desc_pool) {
728 dev_err(&chan->dev->device,
729 "failed to allocate descriptor pool\n");
730 return -ENOMEM;
731 }
732
733 return 0;
734}
735
736static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
737{
738 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
739
740 vchan_free_chan_resources(&jzchan->vchan);
741 dma_pool_destroy(jzchan->desc_pool);
742 jzchan->desc_pool = NULL;
743}
744
745static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
746{
747 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
748 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
Alex Smith026fd402015-07-24 17:24:24 +0100749 struct jz4780_dma_filter_data *data = param;
750
751 if (jzdma->dma_device.dev->of_node != data->of_node)
752 return false;
Alex Smithd894fc62015-03-18 16:16:36 +0000753
754 if (data->channel > -1) {
755 if (data->channel != jzchan->id)
756 return false;
757 } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
758 return false;
759 }
760
761 jzchan->transfer_type = data->transfer_type;
762
763 return true;
764}
765
766static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
767 struct of_dma *ofdma)
768{
769 struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
770 dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
Alex Smith026fd402015-07-24 17:24:24 +0100771 struct jz4780_dma_filter_data data;
Alex Smithd894fc62015-03-18 16:16:36 +0000772
773 if (dma_spec->args_count != 2)
774 return NULL;
775
Alex Smith026fd402015-07-24 17:24:24 +0100776 data.of_node = ofdma->of_node;
Alex Smithd894fc62015-03-18 16:16:36 +0000777 data.transfer_type = dma_spec->args[0];
778 data.channel = dma_spec->args[1];
779
780 if (data.channel > -1) {
Paul Cercueil6147b032018-08-29 23:32:45 +0200781 if (data.channel >= jzdma->soc_data->nb_channels) {
Alex Smithd894fc62015-03-18 16:16:36 +0000782 dev_err(jzdma->dma_device.dev,
783 "device requested non-existent channel %u\n",
784 data.channel);
785 return NULL;
786 }
787
788 /* Can only select a channel marked as reserved. */
789 if (!(jzdma->chan_reserved & BIT(data.channel))) {
790 dev_err(jzdma->dma_device.dev,
791 "device requested unreserved channel %u\n",
792 data.channel);
793 return NULL;
794 }
Alex Smithd894fc62015-03-18 16:16:36 +0000795
Alex Smithd3273e12015-07-24 17:24:23 +0100796 jzdma->chan[data.channel].transfer_type = data.transfer_type;
797
798 return dma_get_slave_channel(
799 &jzdma->chan[data.channel].vchan.chan);
800 } else {
801 return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
802 }
Alex Smithd894fc62015-03-18 16:16:36 +0000803}
804
805static int jz4780_dma_probe(struct platform_device *pdev)
806{
807 struct device *dev = &pdev->dev;
Paul Cercueil6147b032018-08-29 23:32:45 +0200808 const struct jz4780_dma_soc_data *soc_data;
Alex Smithd894fc62015-03-18 16:16:36 +0000809 struct jz4780_dma_dev *jzdma;
810 struct jz4780_dma_chan *jzchan;
811 struct dma_device *dd;
812 struct resource *res;
813 int i, ret;
814
Paul Cercueil54f919a2018-08-29 23:32:44 +0200815 if (!dev->of_node) {
816 dev_err(dev, "This driver must be probed from devicetree\n");
817 return -EINVAL;
818 }
819
Paul Cercueil6147b032018-08-29 23:32:45 +0200820 soc_data = device_get_match_data(dev);
821 if (!soc_data)
822 return -EINVAL;
823
824 jzdma = devm_kzalloc(dev, sizeof(*jzdma)
825 + sizeof(*jzdma->chan) * soc_data->nb_channels,
826 GFP_KERNEL);
Alex Smithd894fc62015-03-18 16:16:36 +0000827 if (!jzdma)
828 return -ENOMEM;
829
Paul Cercueil6147b032018-08-29 23:32:45 +0200830 jzdma->soc_data = soc_data;
Alex Smithd894fc62015-03-18 16:16:36 +0000831 platform_set_drvdata(pdev, jzdma);
832
833 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
834 if (!res) {
835 dev_err(dev, "failed to get I/O memory\n");
836 return -EINVAL;
837 }
838
Paul Cercueil33633582018-08-29 23:32:46 +0200839 jzdma->chn_base = devm_ioremap_resource(dev, res);
840 if (IS_ERR(jzdma->chn_base))
841 return PTR_ERR(jzdma->chn_base);
842
843 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
844 if (res) {
845 jzdma->ctrl_base = devm_ioremap_resource(dev, res);
846 if (IS_ERR(jzdma->ctrl_base))
847 return PTR_ERR(jzdma->ctrl_base);
Paul Cercueil29870eb2018-08-29 23:32:49 +0200848 } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
Paul Cercueil33633582018-08-29 23:32:46 +0200849 /*
850 * On JZ4780, if the second memory resource was not supplied,
851 * assume we're using an old devicetree, and calculate the
852 * offset to the control registers.
853 */
854 jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
Paul Cercueil29870eb2018-08-29 23:32:49 +0200855 } else {
856 dev_err(dev, "failed to get I/O memory\n");
857 return -EINVAL;
Paul Cercueil33633582018-08-29 23:32:46 +0200858 }
Alex Smithd894fc62015-03-18 16:16:36 +0000859
Alex Smith839896e2015-07-24 17:24:22 +0100860 ret = platform_get_irq(pdev, 0);
861 if (ret < 0) {
Alex Smithd894fc62015-03-18 16:16:36 +0000862 dev_err(dev, "failed to get IRQ: %d\n", ret);
Alex Smith839896e2015-07-24 17:24:22 +0100863 return ret;
Alex Smithd894fc62015-03-18 16:16:36 +0000864 }
865
Alex Smith839896e2015-07-24 17:24:22 +0100866 jzdma->irq = ret;
867
Alex Smithd509a832015-07-24 17:24:26 +0100868 ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
869 jzdma);
Alex Smithd894fc62015-03-18 16:16:36 +0000870 if (ret) {
871 dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
Alex Smith839896e2015-07-24 17:24:22 +0100872 return ret;
Alex Smithd894fc62015-03-18 16:16:36 +0000873 }
874
875 jzdma->clk = devm_clk_get(dev, NULL);
876 if (IS_ERR(jzdma->clk)) {
877 dev_err(dev, "failed to get clock\n");
Alex Smithd509a832015-07-24 17:24:26 +0100878 ret = PTR_ERR(jzdma->clk);
879 goto err_free_irq;
Alex Smithd894fc62015-03-18 16:16:36 +0000880 }
881
882 clk_prepare_enable(jzdma->clk);
883
884 /* Property is optional, if it doesn't exist the value will remain 0. */
885 of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
886 0, &jzdma->chan_reserved);
887
888 dd = &jzdma->dma_device;
889
890 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
891 dma_cap_set(DMA_SLAVE, dd->cap_mask);
892 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
893
894 dd->dev = dev;
Maxime Ripard77a68e52015-07-20 10:41:32 +0200895 dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
Alex Smithd894fc62015-03-18 16:16:36 +0000896 dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
897 dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
898 dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
899 dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
900 dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
Alex Smith46fa51682015-07-24 17:24:20 +0100901 dd->device_config = jz4780_dma_config;
Alex Smithd894fc62015-03-18 16:16:36 +0000902 dd->device_terminate_all = jz4780_dma_terminate_all;
Peter Ujfalusif0dd52c2017-11-14 16:32:08 +0200903 dd->device_synchronize = jz4780_dma_synchronize;
Alex Smithd894fc62015-03-18 16:16:36 +0000904 dd->device_tx_status = jz4780_dma_tx_status;
905 dd->device_issue_pending = jz4780_dma_issue_pending;
906 dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
907 dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
908 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
909 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
910
Alex Smithd894fc62015-03-18 16:16:36 +0000911 /*
912 * Enable DMA controller, mark all channels as not programmable.
913 * Also set the FMSC bit - it increases MSC performance, so it makes
914 * little sense not to enable it.
915 */
Paul Cercueil33633582018-08-29 23:32:46 +0200916 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC,
Alex Smithd894fc62015-03-18 16:16:36 +0000917 JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
Paul Cercueil29870eb2018-08-29 23:32:49 +0200918
919 if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
920 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
Alex Smithd894fc62015-03-18 16:16:36 +0000921
922 INIT_LIST_HEAD(&dd->channels);
923
Paul Cercueil6147b032018-08-29 23:32:45 +0200924 for (i = 0; i < soc_data->nb_channels; i++) {
Alex Smithd894fc62015-03-18 16:16:36 +0000925 jzchan = &jzdma->chan[i];
926 jzchan->id = i;
927
928 vchan_init(&jzchan->vchan, dd);
929 jzchan->vchan.desc_free = jz4780_dma_desc_free;
930 }
931
932 ret = dma_async_device_register(dd);
933 if (ret) {
934 dev_err(dev, "failed to register device\n");
935 goto err_disable_clk;
936 }
937
938 /* Register with OF DMA helpers. */
939 ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
940 jzdma);
941 if (ret) {
942 dev_err(dev, "failed to register OF DMA controller\n");
943 goto err_unregister_dev;
944 }
945
946 dev_info(dev, "JZ4780 DMA controller initialised\n");
947 return 0;
948
949err_unregister_dev:
950 dma_async_device_unregister(dd);
951
952err_disable_clk:
953 clk_disable_unprepare(jzdma->clk);
Alex Smithd509a832015-07-24 17:24:26 +0100954
955err_free_irq:
956 free_irq(jzdma->irq, jzdma);
Alex Smithd894fc62015-03-18 16:16:36 +0000957 return ret;
958}
959
960static int jz4780_dma_remove(struct platform_device *pdev)
961{
962 struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
Alex Smithae9c02b2015-07-24 17:24:27 +0100963 int i;
Alex Smithd894fc62015-03-18 16:16:36 +0000964
965 of_dma_controller_free(pdev->dev.of_node);
Alex Smithae9c02b2015-07-24 17:24:27 +0100966
Alex Smithd509a832015-07-24 17:24:26 +0100967 free_irq(jzdma->irq, jzdma);
Alex Smithae9c02b2015-07-24 17:24:27 +0100968
Paul Cercueil6147b032018-08-29 23:32:45 +0200969 for (i = 0; i < jzdma->soc_data->nb_channels; i++)
Alex Smithae9c02b2015-07-24 17:24:27 +0100970 tasklet_kill(&jzdma->chan[i].vchan.task);
971
Alex Smithd894fc62015-03-18 16:16:36 +0000972 dma_async_device_unregister(&jzdma->dma_device);
973 return 0;
974}
975
Paul Cercueil29870eb2018-08-29 23:32:49 +0200976static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
977 .nb_channels = 6,
978 .transfer_ord_max = 6,
979 .flags = JZ_SOC_DATA_PER_CHAN_PM,
980};
981
Paul Cercueil6147b032018-08-29 23:32:45 +0200982static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
983 .nb_channels = 32,
Paul Cercueil29870eb2018-08-29 23:32:49 +0200984 .transfer_ord_max = 7,
985 .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
Paul Cercueil6147b032018-08-29 23:32:45 +0200986};
987
Alex Smithd894fc62015-03-18 16:16:36 +0000988static const struct of_device_id jz4780_dma_dt_match[] = {
Paul Cercueil29870eb2018-08-29 23:32:49 +0200989 { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
Paul Cercueil6147b032018-08-29 23:32:45 +0200990 { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
Alex Smithd894fc62015-03-18 16:16:36 +0000991 {},
992};
993MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
994
995static struct platform_driver jz4780_dma_driver = {
996 .probe = jz4780_dma_probe,
997 .remove = jz4780_dma_remove,
998 .driver = {
999 .name = "jz4780-dma",
1000 .of_match_table = of_match_ptr(jz4780_dma_dt_match),
1001 },
1002};
1003
1004static int __init jz4780_dma_init(void)
1005{
1006 return platform_driver_register(&jz4780_dma_driver);
1007}
1008subsys_initcall(jz4780_dma_init);
1009
1010static void __exit jz4780_dma_exit(void)
1011{
1012 platform_driver_unregister(&jz4780_dma_driver);
1013}
1014module_exit(jz4780_dma_exit);
1015
1016MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
1017MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
1018MODULE_LICENSE("GPL");