blob: 308bed0a560acdf6016f3438f3c9e4e26c5ddebb [file] [log] [blame]
Thomas Gleixnerea2305f2019-05-20 19:08:05 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Zhang Wei173acc72008-03-01 07:42:48 -07002/*
Forrest Shif3c677b2010-12-09 16:14:04 +08003 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
Zhang Wei173acc72008-03-01 07:42:48 -07004 *
5 * Author:
6 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
7 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
Zhang Wei173acc72008-03-01 07:42:48 -07008 */
9#ifndef __DMA_FSLDMA_H
10#define __DMA_FSLDMA_H
11
12#include <linux/device.h>
13#include <linux/dmapool.h>
14#include <linux/dmaengine.h>
15
16/* Define data structures needed by Freescale
17 * MPC8540 and MPC8349 DMA controller.
18 */
19#define FSL_DMA_MR_CS 0x00000001
20#define FSL_DMA_MR_CC 0x00000002
21#define FSL_DMA_MR_CA 0x00000008
22#define FSL_DMA_MR_EIE 0x00000040
23#define FSL_DMA_MR_XFE 0x00000020
24#define FSL_DMA_MR_EOLNIE 0x00000100
25#define FSL_DMA_MR_EOLSIE 0x00000080
26#define FSL_DMA_MR_EOSIE 0x00000200
27#define FSL_DMA_MR_CDSM 0x00000010
28#define FSL_DMA_MR_CTM 0x00000004
29#define FSL_DMA_MR_EMP_EN 0x00200000
30#define FSL_DMA_MR_EMS_EN 0x00040000
31#define FSL_DMA_MR_DAHE 0x00002000
32#define FSL_DMA_MR_SAHE 0x00001000
33
Thomas Breitungccc07722017-06-19 16:40:04 +020034#define FSL_DMA_MR_SAHTS_MASK 0x0000C000
35#define FSL_DMA_MR_DAHTS_MASK 0x00030000
36#define FSL_DMA_MR_BWC_MASK 0x0f000000
37
Forrest Shif3c677b2010-12-09 16:14:04 +080038/*
39 * Bandwidth/pause control determines how many bytes a given
40 * channel is allowed to transfer before the DMA engine pauses
41 * the current channel and switches to the next channel
42 */
Hongbo Zhang0ca583a2014-01-16 14:10:53 +080043#define FSL_DMA_MR_BWC 0x0A000000
Forrest Shif3c677b2010-12-09 16:14:04 +080044
Zhang Wei173acc72008-03-01 07:42:48 -070045/* Special MR definition for MPC8349 */
46#define FSL_DMA_MR_EOTIE 0x00000080
Ira W. Snydera7aea372009-04-23 16:17:54 -070047#define FSL_DMA_MR_PRC_RM 0x00000800
Zhang Wei173acc72008-03-01 07:42:48 -070048
49#define FSL_DMA_SR_CH 0x00000020
Zhang Weif79abb62008-03-18 18:45:00 -070050#define FSL_DMA_SR_PE 0x00000010
Zhang Wei173acc72008-03-01 07:42:48 -070051#define FSL_DMA_SR_CB 0x00000004
52#define FSL_DMA_SR_TE 0x00000080
53#define FSL_DMA_SR_EOSI 0x00000002
54#define FSL_DMA_SR_EOLSI 0x00000001
55#define FSL_DMA_SR_EOCDI 0x00000001
56#define FSL_DMA_SR_EOLNI 0x00000008
57
58#define FSL_DMA_SATR_SBPATMU 0x20000000
59#define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000
60#define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000
61#define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000
62#define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000
63#define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000
64
65#define FSL_DMA_DATR_DBPATMU 0x20000000
66#define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000
67#define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000
68#define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000
69
70#define FSL_DMA_EOL ((u64)0x1)
71#define FSL_DMA_SNEN ((u64)0x10)
72#define FSL_DMA_EOSIE 0x8
73#define FSL_DMA_NLDA_MASK (~(u64)0x1f)
74
75#define FSL_DMA_BCR_MAX_CNT 0x03ffffffu
76
77#define FSL_DMA_DGSR_TE 0x80
78#define FSL_DMA_DGSR_CH 0x20
79#define FSL_DMA_DGSR_PE 0x10
80#define FSL_DMA_DGSR_EOLNI 0x08
81#define FSL_DMA_DGSR_CB 0x04
82#define FSL_DMA_DGSR_EOSI 0x02
83#define FSL_DMA_DGSR_EOLSI 0x01
84
Kevin Hao75dc1772015-01-08 18:38:16 +080085#define FSL_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
86 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
87 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
88 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
Al Viroa4e6d5d2008-03-29 03:10:18 +000089typedef u64 __bitwise v64;
90typedef u32 __bitwise v32;
91
Zhang Wei173acc72008-03-01 07:42:48 -070092struct fsl_dma_ld_hw {
Al Viroa4e6d5d2008-03-29 03:10:18 +000093 v64 src_addr;
94 v64 dst_addr;
95 v64 next_ln_addr;
96 v32 count;
97 v32 reserve;
Zhang Wei173acc72008-03-01 07:42:48 -070098} __attribute__((aligned(32)));
99
100struct fsl_desc_sw {
101 struct fsl_dma_ld_hw hw;
102 struct list_head node;
Dan Williamseda34232009-09-08 17:53:02 -0700103 struct list_head tx_list;
Zhang Wei173acc72008-03-01 07:42:48 -0700104 struct dma_async_tx_descriptor async_tx;
Zhang Wei173acc72008-03-01 07:42:48 -0700105} __attribute__((aligned(32)));
106
Ira Snydera4f56d42010-01-06 13:34:01 +0000107struct fsldma_chan_regs {
Ira Snyder31f43062011-03-03 07:54:57 +0000108 u32 mr; /* 0x00 - Mode Register */
109 u32 sr; /* 0x04 - Status Register */
Al Viroa4e6d5d2008-03-29 03:10:18 +0000110 u64 cdar; /* 0x08 - Current descriptor address register */
111 u64 sar; /* 0x10 - Source Address Register */
112 u64 dar; /* 0x18 - Destination Address Register */
113 u32 bcr; /* 0x20 - Byte Count Register */
114 u64 ndar; /* 0x24 - Next Descriptor Address Register */
Zhang Wei173acc72008-03-01 07:42:48 -0700115};
116
Ira Snydera4f56d42010-01-06 13:34:01 +0000117struct fsldma_chan;
Hongbo Zhang8de7a7d2013-09-26 17:33:43 +0800118#define FSL_DMA_MAX_CHANS_PER_DEVICE 8
Zhang Wei173acc72008-03-01 07:42:48 -0700119
Ira Snydera4f56d42010-01-06 13:34:01 +0000120struct fsldma_device {
Ira Snydere7a29152010-01-06 13:34:03 +0000121 void __iomem *regs; /* DGSR register base */
Zhang Wei173acc72008-03-01 07:42:48 -0700122 struct device *dev;
123 struct dma_device common;
Ira Snydera4f56d42010-01-06 13:34:01 +0000124 struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
Zhang Wei173acc72008-03-01 07:42:48 -0700125 u32 feature; /* The same as DMA channels */
Timur Tabi77cd62e2008-09-26 17:00:11 -0700126 int irq; /* Channel IRQ */
Zhang Wei173acc72008-03-01 07:42:48 -0700127};
128
Ira Snydera4f56d42010-01-06 13:34:01 +0000129/* Define macros for fsldma_chan->feature property */
Zhang Wei173acc72008-03-01 07:42:48 -0700130#define FSL_DMA_LITTLE_ENDIAN 0x00000000
131#define FSL_DMA_BIG_ENDIAN 0x00000001
132
133#define FSL_DMA_IP_MASK 0x00000ff0
134#define FSL_DMA_IP_85XX 0x00000010
135#define FSL_DMA_IP_83XX 0x00000020
136
137#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
138#define FSL_DMA_CHAN_START_EXT 0x00002000
139
Hongbo Zhang14c6a332014-05-21 16:03:02 +0800140#ifdef CONFIG_PM
141struct fsldma_chan_regs_save {
142 u32 mr;
143};
144
145enum fsldma_pm_state {
146 RUNNING = 0,
147 SUSPENDED,
148};
149#endif
150
Ira Snydera4f56d42010-01-06 13:34:01 +0000151struct fsldma_chan {
Ira Snyderb1584712011-03-03 07:54:55 +0000152 char name[8]; /* Channel name */
Ira Snydere7a29152010-01-06 13:34:03 +0000153 struct fsldma_chan_regs __iomem *regs;
Zhang Wei173acc72008-03-01 07:42:48 -0700154 spinlock_t desc_lock; /* Descriptor operation lock */
Hongbo Zhang43452fa2014-05-21 16:03:03 +0800155 /*
156 * Descriptors which are queued to run, but have not yet been
157 * submitted to the hardware for execution
158 */
159 struct list_head ld_pending;
160 /*
161 * Descriptors which are currently being executed by the hardware
162 */
163 struct list_head ld_running;
164 /*
165 * Descriptors which have finished execution by the hardware. These
166 * descriptors have already had their cleanup actions run. They are
167 * waiting for the ACK bit to be set by the async_tx API.
168 */
169 struct list_head ld_completed; /* Link descriptors queue */
Zhang Wei173acc72008-03-01 07:42:48 -0700170 struct dma_chan common; /* DMA common channel */
171 struct dma_pool *desc_pool; /* Descriptors pool */
172 struct device *dev; /* Channel device */
Zhang Wei173acc72008-03-01 07:42:48 -0700173 int irq; /* Channel IRQ */
174 int id; /* Raw id of this channel */
175 struct tasklet_struct tasklet;
176 u32 feature;
Ira Snyderf04cd402011-03-03 07:54:58 +0000177 bool idle; /* DMA controller is idle */
Hongbo Zhang14c6a332014-05-21 16:03:02 +0800178#ifdef CONFIG_PM
179 struct fsldma_chan_regs_save regs_save;
180 enum fsldma_pm_state pm_state;
181#endif
Zhang Wei173acc72008-03-01 07:42:48 -0700182
Ira Snydera4f56d42010-01-06 13:34:01 +0000183 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
184 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
185 void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
Ira Snyder738f5f72010-01-06 13:34:02 +0000186 void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
Ira Snydera4f56d42010-01-06 13:34:01 +0000187 void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
Zhang Wei173acc72008-03-01 07:42:48 -0700188};
189
Ira Snydera4f56d42010-01-06 13:34:01 +0000190#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
Zhang Wei173acc72008-03-01 07:42:48 -0700191#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
192#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
193
Peng Maa1ff82a2018-10-30 10:35:59 +0800194#ifdef CONFIG_PPC
195#define fsl_ioread32(p) in_le32(p)
196#define fsl_ioread32be(p) in_be32(p)
197#define fsl_iowrite32(v, p) out_le32(p, v)
198#define fsl_iowrite32be(v, p) out_be32(p, v)
199
Scott Wood6175f6a2018-12-21 22:34:45 -0600200#ifdef __powerpc64__
201#define fsl_ioread64(p) in_le64(p)
202#define fsl_ioread64be(p) in_be64(p)
203#define fsl_iowrite64(v, p) out_le64(p, v)
204#define fsl_iowrite64be(v, p) out_be64(p, v)
205#else
Peng Maa1ff82a2018-10-30 10:35:59 +0800206static u64 fsl_ioread64(const u64 __iomem *addr)
Zhang Wei173acc72008-03-01 07:42:48 -0700207{
Linus Torvalds0a4c56c2020-08-29 13:50:56 -0700208 u32 val_lo = in_le32((u32 __iomem *)addr);
209 u32 val_hi = in_le32((u32 __iomem *)addr + 1);
Peng Maa1ff82a2018-10-30 10:35:59 +0800210
Linus Torvalds0a4c56c2020-08-29 13:50:56 -0700211 return ((u64)val_hi << 32) + val_lo;
Zhang Wei173acc72008-03-01 07:42:48 -0700212}
213
Peng Maa1ff82a2018-10-30 10:35:59 +0800214static void fsl_iowrite64(u64 val, u64 __iomem *addr)
Zhang Wei173acc72008-03-01 07:42:48 -0700215{
Al Viroa4e6d5d2008-03-29 03:10:18 +0000216 out_le32((u32 __iomem *)addr + 1, val >> 32);
217 out_le32((u32 __iomem *)addr, (u32)val);
Zhang Wei173acc72008-03-01 07:42:48 -0700218}
Peng Maa1ff82a2018-10-30 10:35:59 +0800219
220static u64 fsl_ioread64be(const u64 __iomem *addr)
221{
Linus Torvalds0a4c56c2020-08-29 13:50:56 -0700222 u32 val_hi = in_be32((u32 __iomem *)addr);
223 u32 val_lo = in_be32((u32 __iomem *)addr + 1);
Peng Maa1ff82a2018-10-30 10:35:59 +0800224
Linus Torvalds0a4c56c2020-08-29 13:50:56 -0700225 return ((u64)val_hi << 32) + val_lo;
Peng Maa1ff82a2018-10-30 10:35:59 +0800226}
227
228static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
229{
230 out_be32((u32 __iomem *)addr, val >> 32);
231 out_be32((u32 __iomem *)addr + 1, (u32)val);
232}
233#endif
Zhang Wei173acc72008-03-01 07:42:48 -0700234#endif
235
Peng Maa1ff82a2018-10-30 10:35:59 +0800236#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
237#define fsl_ioread32(p) ioread32(p)
238#define fsl_ioread32be(p) ioread32be(p)
239#define fsl_iowrite32(v, p) iowrite32(v, p)
240#define fsl_iowrite32be(v, p) iowrite32be(v, p)
241#define fsl_ioread64(p) ioread64(p)
242#define fsl_ioread64be(p) ioread64be(p)
243#define fsl_iowrite64(v, p) iowrite64(v, p)
244#define fsl_iowrite64be(v, p) iowrite64be(v, p)
245#endif
246
247#define FSL_DMA_IN(fsl_dma, addr, width) \
248 (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \
249 fsl_ioread##width##be(addr) : fsl_ioread##width(addr))
250
251#define FSL_DMA_OUT(fsl_dma, addr, val, width) \
252 (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \
253 fsl_iowrite##width##be(val, addr) : fsl_iowrite \
254 ##width(val, addr))
Zhang Wei173acc72008-03-01 07:42:48 -0700255
256#define DMA_TO_CPU(fsl_chan, d, width) \
257 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
Al Viroa4e6d5d2008-03-29 03:10:18 +0000258 be##width##_to_cpu((__force __be##width)(v##width)d) : \
259 le##width##_to_cpu((__force __le##width)(v##width)d))
Zhang Wei173acc72008-03-01 07:42:48 -0700260#define CPU_TO_DMA(fsl_chan, c, width) \
261 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
Al Viroa4e6d5d2008-03-29 03:10:18 +0000262 (__force v##width)cpu_to_be##width(c) : \
263 (__force v##width)cpu_to_le##width(c))
Zhang Wei173acc72008-03-01 07:42:48 -0700264
265#endif /* __DMA_FSLDMA_H */