blob: 07fd4f25cdd80c8648d64129f3432340189bd1a5 [file] [log] [blame]
Andy Shevchenko667dfed2015-07-27 18:04:02 +03001/*
2 * Core driver for the Intel integrated DMA 64-bit
3 *
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/bitops.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/dmapool.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21
Andy Shevchenkoffcfc202019-04-09 18:02:19 +030022#include <linux/dma/idma64.h>
Andy Shevchenko667dfed2015-07-27 18:04:02 +030023
Andy Shevchenkoffcfc202019-04-09 18:02:19 +030024#include "idma64.h"
Andy Shevchenko667dfed2015-07-27 18:04:02 +030025
26/* For now we support only two channels */
27#define IDMA64_NR_CHAN 2
28
29/* ---------------------------------------------------------------------- */
30
31static struct device *chan2dev(struct dma_chan *chan)
32{
33 return &chan->dev->device;
34}
35
36/* ---------------------------------------------------------------------- */
37
38static void idma64_off(struct idma64 *idma64)
39{
40 unsigned short count = 100;
41
42 dma_writel(idma64, CFG, 0);
43
44 channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
45 channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
46 channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
47 channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
48 channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
49
50 do {
51 cpu_relax();
52 } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
53}
54
55static void idma64_on(struct idma64 *idma64)
56{
57 dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
58}
59
60/* ---------------------------------------------------------------------- */
61
62static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
63{
64 u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
65 u32 cfglo = 0;
66
Andy Shevchenko667dfed2015-07-27 18:04:02 +030067 /* Set default burst alignment */
68 cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
69
70 channel_writel(idma64c, CFG_LO, cfglo);
71 channel_writel(idma64c, CFG_HI, cfghi);
72
73 /* Enable interrupts */
74 channel_set_bit(idma64, MASK(XFER), idma64c->mask);
75 channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
76
77 /*
78 * Enforce the controller to be turned on.
79 *
80 * The iDMA is turned off in ->probe() and looses context during system
81 * suspend / resume cycle. That's why we have to enable it each time we
82 * use it.
83 */
84 idma64_on(idma64);
85}
86
87static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
88{
89 channel_clear_bit(idma64, CH_EN, idma64c->mask);
90}
91
92static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
93{
94 struct idma64_desc *desc = idma64c->desc;
95 struct idma64_hw_desc *hw = &desc->hw[0];
96
97 channel_writeq(idma64c, SAR, 0);
98 channel_writeq(idma64c, DAR, 0);
99
100 channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
101 channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
102
103 channel_writeq(idma64c, LLP, hw->llp);
104
105 channel_set_bit(idma64, CH_EN, idma64c->mask);
106}
107
108static void idma64_stop_transfer(struct idma64_chan *idma64c)
109{
110 struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
111
112 idma64_chan_stop(idma64, idma64c);
113}
114
115static void idma64_start_transfer(struct idma64_chan *idma64c)
116{
117 struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
118 struct virt_dma_desc *vdesc;
119
120 /* Get the next descriptor */
121 vdesc = vchan_next_desc(&idma64c->vchan);
122 if (!vdesc) {
123 idma64c->desc = NULL;
124 return;
125 }
126
127 list_del(&vdesc->node);
128 idma64c->desc = to_idma64_desc(vdesc);
129
130 /* Configure the channel */
131 idma64_chan_init(idma64, idma64c);
132
133 /* Start the channel with a new descriptor */
134 idma64_chan_start(idma64, idma64c);
135}
136
137/* ---------------------------------------------------------------------- */
138
139static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
140 u32 status_err, u32 status_xfer)
141{
142 struct idma64_chan *idma64c = &idma64->chan[c];
143 struct idma64_desc *desc;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300144
Zhaoxiong Yuan7645d262018-09-08 06:02:10 +1100145 spin_lock(&idma64c->vchan.lock);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300146 desc = idma64c->desc;
147 if (desc) {
148 if (status_err & (1 << c)) {
149 dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
150 desc->status = DMA_ERROR;
151 } else if (status_xfer & (1 << c)) {
152 dma_writel(idma64, CLEAR(XFER), idma64c->mask);
153 desc->status = DMA_COMPLETE;
154 vchan_cookie_complete(&desc->vdesc);
155 idma64_start_transfer(idma64c);
156 }
157
158 /* idma64_start_transfer() updates idma64c->desc */
159 if (idma64c->desc == NULL || desc->status == DMA_ERROR)
160 idma64_stop_transfer(idma64c);
161 }
Zhaoxiong Yuan7645d262018-09-08 06:02:10 +1100162 spin_unlock(&idma64c->vchan.lock);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300163}
164
165static irqreturn_t idma64_irq(int irq, void *dev)
166{
167 struct idma64 *idma64 = dev;
168 u32 status = dma_readl(idma64, STATUS_INT);
169 u32 status_xfer;
170 u32 status_err;
171 unsigned short i;
172
173 dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
174
175 /* Check if we have any interrupt from the DMA controller */
176 if (!status)
177 return IRQ_NONE;
178
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300179 status_xfer = dma_readl(idma64, RAW(XFER));
180 status_err = dma_readl(idma64, RAW(ERROR));
181
182 for (i = 0; i < idma64->dma.chancnt; i++)
183 idma64_chan_irq(idma64, i, status_err, status_xfer);
184
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300185 return IRQ_HANDLED;
186}
187
188/* ---------------------------------------------------------------------- */
189
190static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
191{
192 struct idma64_desc *desc;
193
194 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
195 if (!desc)
196 return NULL;
197
198 desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
199 if (!desc->hw) {
200 kfree(desc);
201 return NULL;
202 }
203
204 return desc;
205}
206
207static void idma64_desc_free(struct idma64_chan *idma64c,
208 struct idma64_desc *desc)
209{
210 struct idma64_hw_desc *hw;
211
212 if (desc->ndesc) {
213 unsigned int i = desc->ndesc;
214
215 do {
216 hw = &desc->hw[--i];
217 dma_pool_free(idma64c->pool, hw->lli, hw->llp);
218 } while (i);
219 }
220
221 kfree(desc->hw);
222 kfree(desc);
223}
224
225static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
226{
227 struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
228
229 idma64_desc_free(idma64c, to_idma64_desc(vdesc));
230}
231
Andy Shevchenkoac029792015-11-17 13:37:09 +0200232static void idma64_hw_desc_fill(struct idma64_hw_desc *hw,
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300233 struct dma_slave_config *config,
234 enum dma_transfer_direction direction, u64 llp)
235{
236 struct idma64_lli *lli = hw->lli;
237 u64 sar, dar;
238 u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
239 u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
240 u32 src_width, dst_width;
241
242 if (direction == DMA_MEM_TO_DEV) {
243 sar = hw->phys;
244 dar = config->dst_addr;
245 ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
246 IDMA64C_CTLL_FC_M2P;
Andy Shevchenko22b74402015-09-14 11:55:38 +0300247 src_width = __ffs(sar | hw->len | 4);
Andy Shevchenko87b04592015-09-14 11:55:37 +0300248 dst_width = __ffs(config->dst_addr_width);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300249 } else { /* DMA_DEV_TO_MEM */
250 sar = config->src_addr;
251 dar = hw->phys;
252 ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
253 IDMA64C_CTLL_FC_P2M;
Andy Shevchenko87b04592015-09-14 11:55:37 +0300254 src_width = __ffs(config->src_addr_width);
Andy Shevchenko22b74402015-09-14 11:55:38 +0300255 dst_width = __ffs(dar | hw->len | 4);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300256 }
257
258 lli->sar = sar;
259 lli->dar = dar;
260
261 lli->ctlhi = ctlhi;
262 lli->ctllo = ctllo |
263 IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
264 IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
265 IDMA64C_CTLL_DST_WIDTH(dst_width) |
266 IDMA64C_CTLL_SRC_WIDTH(src_width);
267
268 lli->llp = llp;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300269}
270
271static void idma64_desc_fill(struct idma64_chan *idma64c,
272 struct idma64_desc *desc)
273{
274 struct dma_slave_config *config = &idma64c->config;
Andy Shevchenko390c49f2015-11-17 13:37:10 +0200275 unsigned int i = desc->ndesc;
276 struct idma64_hw_desc *hw = &desc->hw[i - 1];
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300277 struct idma64_lli *lli = hw->lli;
278 u64 llp = 0;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300279
280 /* Fill the hardware descriptors and link them to a list */
281 do {
282 hw = &desc->hw[--i];
Andy Shevchenkoac029792015-11-17 13:37:09 +0200283 idma64_hw_desc_fill(hw, config, desc->direction, llp);
284 llp = hw->llp;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300285 desc->length += hw->len;
286 } while (i);
287
Andy Shevchenko390c49f2015-11-17 13:37:10 +0200288 /* Trigger an interrupt after the last block is transfered */
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300289 lli->ctllo |= IDMA64C_CTLL_INT_EN;
Andy Shevchenkoa2826e62016-01-26 13:43:13 +0200290
291 /* Disable LLP transfer in the last block */
292 lli->ctllo &= ~(IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300293}
294
295static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
296 struct dma_chan *chan, struct scatterlist *sgl,
297 unsigned int sg_len, enum dma_transfer_direction direction,
298 unsigned long flags, void *context)
299{
300 struct idma64_chan *idma64c = to_idma64_chan(chan);
301 struct idma64_desc *desc;
302 struct scatterlist *sg;
303 unsigned int i;
304
305 desc = idma64_alloc_desc(sg_len);
306 if (!desc)
307 return NULL;
308
309 for_each_sg(sgl, sg, sg_len, i) {
310 struct idma64_hw_desc *hw = &desc->hw[i];
311
312 /* Allocate DMA capable memory for hardware descriptor */
313 hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
314 if (!hw->lli) {
315 desc->ndesc = i;
316 idma64_desc_free(idma64c, desc);
317 return NULL;
318 }
319
320 hw->phys = sg_dma_address(sg);
321 hw->len = sg_dma_len(sg);
322 }
323
324 desc->ndesc = sg_len;
325 desc->direction = direction;
326 desc->status = DMA_IN_PROGRESS;
327
328 idma64_desc_fill(idma64c, desc);
329 return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
330}
331
332static void idma64_issue_pending(struct dma_chan *chan)
333{
334 struct idma64_chan *idma64c = to_idma64_chan(chan);
335 unsigned long flags;
336
337 spin_lock_irqsave(&idma64c->vchan.lock, flags);
338 if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
339 idma64_start_transfer(idma64c);
340 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
341}
342
343static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
344{
345 struct idma64_desc *desc = idma64c->desc;
346 struct idma64_hw_desc *hw;
347 size_t bytes = desc->length;
Andy Shevchenko0b23a1e2015-09-14 11:55:36 +0300348 u64 llp = channel_readq(idma64c, LLP);
349 u32 ctlhi = channel_readl(idma64c, CTL_HI);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300350 unsigned int i = 0;
351
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300352 do {
353 hw = &desc->hw[i];
Andy Shevchenko0b23a1e2015-09-14 11:55:36 +0300354 if (hw->llp == llp)
355 break;
356 bytes -= hw->len;
357 } while (++i < desc->ndesc);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300358
359 if (!i)
360 return bytes;
361
Andy Shevchenko0b23a1e2015-09-14 11:55:36 +0300362 /* The current chunk is not fully transfered yet */
363 bytes += desc->hw[--i].len;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300364
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300365 return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
366}
367
368static enum dma_status idma64_tx_status(struct dma_chan *chan,
369 dma_cookie_t cookie, struct dma_tx_state *state)
370{
371 struct idma64_chan *idma64c = to_idma64_chan(chan);
372 struct virt_dma_desc *vdesc;
373 enum dma_status status;
374 size_t bytes;
375 unsigned long flags;
376
377 status = dma_cookie_status(chan, cookie, state);
378 if (status == DMA_COMPLETE)
379 return status;
380
381 spin_lock_irqsave(&idma64c->vchan.lock, flags);
382 vdesc = vchan_find_desc(&idma64c->vchan, cookie);
383 if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
384 bytes = idma64_active_desc_size(idma64c);
385 dma_set_residue(state, bytes);
386 status = idma64c->desc->status;
387 } else if (vdesc) {
388 bytes = to_idma64_desc(vdesc)->length;
389 dma_set_residue(state, bytes);
390 }
391 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
392
393 return status;
394}
395
396static void convert_burst(u32 *maxburst)
397{
398 if (*maxburst)
399 *maxburst = __fls(*maxburst);
400 else
401 *maxburst = 0;
402}
403
404static int idma64_slave_config(struct dma_chan *chan,
405 struct dma_slave_config *config)
406{
407 struct idma64_chan *idma64c = to_idma64_chan(chan);
408
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300409 memcpy(&idma64c->config, config, sizeof(idma64c->config));
410
411 convert_burst(&idma64c->config.src_maxburst);
412 convert_burst(&idma64c->config.dst_maxburst);
413
414 return 0;
415}
416
Andy Shevchenko2e9b55b2015-09-14 11:55:40 +0300417static void idma64_chan_deactivate(struct idma64_chan *idma64c, bool drain)
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300418{
419 unsigned short count = 100;
420 u32 cfglo;
421
422 cfglo = channel_readl(idma64c, CFG_LO);
Andy Shevchenko2e9b55b2015-09-14 11:55:40 +0300423 if (drain)
424 cfglo |= IDMA64C_CFGL_CH_DRAIN;
425 else
426 cfglo &= ~IDMA64C_CFGL_CH_DRAIN;
427
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300428 channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
429 do {
430 udelay(1);
431 cfglo = channel_readl(idma64c, CFG_LO);
432 } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
433}
434
435static void idma64_chan_activate(struct idma64_chan *idma64c)
436{
437 u32 cfglo;
438
439 cfglo = channel_readl(idma64c, CFG_LO);
440 channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
441}
442
443static int idma64_pause(struct dma_chan *chan)
444{
445 struct idma64_chan *idma64c = to_idma64_chan(chan);
446 unsigned long flags;
447
448 spin_lock_irqsave(&idma64c->vchan.lock, flags);
449 if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
Andy Shevchenko2e9b55b2015-09-14 11:55:40 +0300450 idma64_chan_deactivate(idma64c, false);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300451 idma64c->desc->status = DMA_PAUSED;
452 }
453 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
454
455 return 0;
456}
457
458static int idma64_resume(struct dma_chan *chan)
459{
460 struct idma64_chan *idma64c = to_idma64_chan(chan);
461 unsigned long flags;
462
463 spin_lock_irqsave(&idma64c->vchan.lock, flags);
464 if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
465 idma64c->desc->status = DMA_IN_PROGRESS;
466 idma64_chan_activate(idma64c);
467 }
468 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
469
470 return 0;
471}
472
473static int idma64_terminate_all(struct dma_chan *chan)
474{
475 struct idma64_chan *idma64c = to_idma64_chan(chan);
476 unsigned long flags;
477 LIST_HEAD(head);
478
479 spin_lock_irqsave(&idma64c->vchan.lock, flags);
Andy Shevchenko2e9b55b2015-09-14 11:55:40 +0300480 idma64_chan_deactivate(idma64c, true);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300481 idma64_stop_transfer(idma64c);
482 if (idma64c->desc) {
483 idma64_vdesc_free(&idma64c->desc->vdesc);
484 idma64c->desc = NULL;
485 }
486 vchan_get_all_descriptors(&idma64c->vchan, &head);
487 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
488
489 vchan_dma_desc_free_list(&idma64c->vchan, &head);
490 return 0;
491}
492
Andy Shevchenkobbacb8e2018-07-10 14:49:42 +0300493static void idma64_synchronize(struct dma_chan *chan)
494{
495 struct idma64_chan *idma64c = to_idma64_chan(chan);
496
497 vchan_synchronize(&idma64c->vchan);
498}
499
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300500static int idma64_alloc_chan_resources(struct dma_chan *chan)
501{
502 struct idma64_chan *idma64c = to_idma64_chan(chan);
503
504 /* Create a pool of consistent memory blocks for hardware descriptors */
505 idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
506 chan->device->dev,
507 sizeof(struct idma64_lli), 8, 0);
508 if (!idma64c->pool) {
509 dev_err(chan2dev(chan), "No memory for descriptors\n");
510 return -ENOMEM;
511 }
512
513 return 0;
514}
515
516static void idma64_free_chan_resources(struct dma_chan *chan)
517{
518 struct idma64_chan *idma64c = to_idma64_chan(chan);
519
520 vchan_free_chan_resources(to_virt_chan(chan));
521 dma_pool_destroy(idma64c->pool);
522 idma64c->pool = NULL;
523}
524
525/* ---------------------------------------------------------------------- */
526
527#define IDMA64_BUSWIDTHS \
528 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
529 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
530 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
531
532static int idma64_probe(struct idma64_chip *chip)
533{
534 struct idma64 *idma64;
535 unsigned short nr_chan = IDMA64_NR_CHAN;
536 unsigned short i;
537 int ret;
538
539 idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
540 if (!idma64)
541 return -ENOMEM;
542
543 idma64->regs = chip->regs;
544 chip->idma64 = idma64;
545
546 idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
547 GFP_KERNEL);
548 if (!idma64->chan)
549 return -ENOMEM;
550
551 idma64->all_chan_mask = (1 << nr_chan) - 1;
552
553 /* Turn off iDMA controller */
554 idma64_off(idma64);
555
556 ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
557 dev_name(chip->dev), idma64);
558 if (ret)
559 return ret;
560
561 INIT_LIST_HEAD(&idma64->dma.channels);
562 for (i = 0; i < nr_chan; i++) {
563 struct idma64_chan *idma64c = &idma64->chan[i];
564
565 idma64c->vchan.desc_free = idma64_vdesc_free;
566 vchan_init(&idma64c->vchan, &idma64->dma);
567
568 idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
569 idma64c->mask = BIT(i);
570 }
571
572 dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
573 dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
574
575 idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
576 idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
577
578 idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
579
580 idma64->dma.device_issue_pending = idma64_issue_pending;
581 idma64->dma.device_tx_status = idma64_tx_status;
582
583 idma64->dma.device_config = idma64_slave_config;
584 idma64->dma.device_pause = idma64_pause;
585 idma64->dma.device_resume = idma64_resume;
586 idma64->dma.device_terminate_all = idma64_terminate_all;
Andy Shevchenkobbacb8e2018-07-10 14:49:42 +0300587 idma64->dma.device_synchronize = idma64_synchronize;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300588
589 idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
590 idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
591 idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
592 idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
593
Andy Shevchenko5ba846b2019-03-18 18:39:30 +0300594 idma64->dma.dev = chip->sysdev;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300595
Andy Shevchenkoe3fdb182015-11-17 13:37:08 +0200596 dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
597
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300598 ret = dma_async_device_register(&idma64->dma);
599 if (ret)
600 return ret;
601
602 dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
603 return 0;
604}
605
606static int idma64_remove(struct idma64_chip *chip)
607{
608 struct idma64 *idma64 = chip->idma64;
609 unsigned short i;
610
611 dma_async_device_unregister(&idma64->dma);
612
613 /*
614 * Explicitly call devm_request_irq() to avoid the side effects with
615 * the scheduled tasklets.
616 */
617 devm_free_irq(chip->dev, chip->irq, idma64);
618
619 for (i = 0; i < idma64->dma.chancnt; i++) {
620 struct idma64_chan *idma64c = &idma64->chan[i];
621
622 tasklet_kill(&idma64c->vchan.task);
623 }
624
625 return 0;
626}
627
628/* ---------------------------------------------------------------------- */
629
630static int idma64_platform_probe(struct platform_device *pdev)
631{
632 struct idma64_chip *chip;
633 struct device *dev = &pdev->dev;
Andy Shevchenko5ba846b2019-03-18 18:39:30 +0300634 struct device *sysdev = dev->parent;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300635 struct resource *mem;
636 int ret;
637
638 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
639 if (!chip)
640 return -ENOMEM;
641
642 chip->irq = platform_get_irq(pdev, 0);
643 if (chip->irq < 0)
644 return chip->irq;
645
646 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
647 chip->regs = devm_ioremap_resource(dev, mem);
648 if (IS_ERR(chip->regs))
649 return PTR_ERR(chip->regs);
650
Andy Shevchenko5ba846b2019-03-18 18:39:30 +0300651 ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300652 if (ret)
653 return ret;
654
655 chip->dev = dev;
Andy Shevchenko5ba846b2019-03-18 18:39:30 +0300656 chip->sysdev = sysdev;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300657
658 ret = idma64_probe(chip);
659 if (ret)
660 return ret;
661
662 platform_set_drvdata(pdev, chip);
663 return 0;
664}
665
666static int idma64_platform_remove(struct platform_device *pdev)
667{
668 struct idma64_chip *chip = platform_get_drvdata(pdev);
669
670 return idma64_remove(chip);
671}
672
673#ifdef CONFIG_PM_SLEEP
674
675static int idma64_pm_suspend(struct device *dev)
676{
Wolfram Sangb7d69792018-04-22 11:14:12 +0200677 struct idma64_chip *chip = dev_get_drvdata(dev);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300678
679 idma64_off(chip->idma64);
680 return 0;
681}
682
683static int idma64_pm_resume(struct device *dev)
684{
Wolfram Sangb7d69792018-04-22 11:14:12 +0200685 struct idma64_chip *chip = dev_get_drvdata(dev);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300686
687 idma64_on(chip->idma64);
688 return 0;
689}
690
691#endif /* CONFIG_PM_SLEEP */
692
693static const struct dev_pm_ops idma64_dev_pm_ops = {
694 SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
695};
696
697static struct platform_driver idma64_platform_driver = {
698 .probe = idma64_platform_probe,
699 .remove = idma64_platform_remove,
700 .driver = {
Andy Shevchenkoffcfc202019-04-09 18:02:19 +0300701 .name = LPSS_IDMA64_DRIVER_NAME,
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300702 .pm = &idma64_dev_pm_ops,
703 },
704};
705
706module_platform_driver(idma64_platform_driver);
707
708MODULE_LICENSE("GPL v2");
709MODULE_DESCRIPTION("iDMA64 core driver");
710MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
Andy Shevchenkoffcfc202019-04-09 18:02:19 +0300711MODULE_ALIAS("platform:" LPSS_IDMA64_DRIVER_NAME);