Kuninori Morimoto | b9b0a74 | 2018-07-04 00:34:10 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 2 | /* |
Hiroyuki Yokoyama | 8a6061c | 2019-04-10 20:26:57 +0200 | [diff] [blame] | 3 | * Renesas R-Car Gen2/Gen3 DMA Controller Driver |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 4 | * |
Hiroyuki Yokoyama | 8a6061c | 2019-04-10 20:26:57 +0200 | [diff] [blame] | 5 | * Copyright (C) 2014-2019 Renesas Electronics Inc. |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 6 | * |
| 7 | * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
Kuninori Morimoto | a8d46a7 | 2017-11-17 11:00:28 +0900 | [diff] [blame] | 10 | #include <linux/delay.h> |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 11 | #include <linux/dma-mapping.h> |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 12 | #include <linux/dmaengine.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/list.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/mutex.h> |
| 17 | #include <linux/of.h> |
| 18 | #include <linux/of_dma.h> |
| 19 | #include <linux/of_platform.h> |
| 20 | #include <linux/platform_device.h> |
| 21 | #include <linux/pm_runtime.h> |
| 22 | #include <linux/slab.h> |
| 23 | #include <linux/spinlock.h> |
| 24 | |
| 25 | #include "../dmaengine.h" |
| 26 | |
| 27 | /* |
| 28 | * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer |
| 29 | * @node: entry in the parent's chunks list |
| 30 | * @src_addr: device source address |
| 31 | * @dst_addr: device destination address |
| 32 | * @size: transfer size in bytes |
| 33 | */ |
| 34 | struct rcar_dmac_xfer_chunk { |
| 35 | struct list_head node; |
| 36 | |
| 37 | dma_addr_t src_addr; |
| 38 | dma_addr_t dst_addr; |
| 39 | u32 size; |
| 40 | }; |
| 41 | |
| 42 | /* |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 43 | * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk |
| 44 | * @sar: value of the SAR register (source address) |
| 45 | * @dar: value of the DAR register (destination address) |
| 46 | * @tcr: value of the TCR register (transfer count) |
| 47 | */ |
| 48 | struct rcar_dmac_hw_desc { |
| 49 | u32 sar; |
| 50 | u32 dar; |
| 51 | u32 tcr; |
| 52 | u32 reserved; |
| 53 | } __attribute__((__packed__)); |
| 54 | |
| 55 | /* |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 56 | * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor |
| 57 | * @async_tx: base DMA asynchronous transaction descriptor |
| 58 | * @direction: direction of the DMA transfer |
| 59 | * @xfer_shift: log2 of the transfer size |
| 60 | * @chcr: value of the channel configuration register for this transfer |
| 61 | * @node: entry in the channel's descriptors lists |
| 62 | * @chunks: list of transfer chunks for this transfer |
| 63 | * @running: the transfer chunk being currently processed |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 64 | * @nchunks: number of transfer chunks for this transfer |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 65 | * @hwdescs.use: whether the transfer descriptor uses hardware descriptors |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 66 | * @hwdescs.mem: hardware descriptors memory for the transfer |
| 67 | * @hwdescs.dma: device address of the hardware descriptors memory |
| 68 | * @hwdescs.size: size of the hardware descriptors in bytes |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 69 | * @size: transfer size in bytes |
| 70 | * @cyclic: when set indicates that the DMA transfer is cyclic |
| 71 | */ |
| 72 | struct rcar_dmac_desc { |
| 73 | struct dma_async_tx_descriptor async_tx; |
| 74 | enum dma_transfer_direction direction; |
| 75 | unsigned int xfer_shift; |
| 76 | u32 chcr; |
| 77 | |
| 78 | struct list_head node; |
| 79 | struct list_head chunks; |
| 80 | struct rcar_dmac_xfer_chunk *running; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 81 | unsigned int nchunks; |
| 82 | |
| 83 | struct { |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 84 | bool use; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 85 | struct rcar_dmac_hw_desc *mem; |
| 86 | dma_addr_t dma; |
| 87 | size_t size; |
| 88 | } hwdescs; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 89 | |
| 90 | unsigned int size; |
| 91 | bool cyclic; |
| 92 | }; |
| 93 | |
| 94 | #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) |
| 95 | |
| 96 | /* |
| 97 | * struct rcar_dmac_desc_page - One page worth of descriptors |
| 98 | * @node: entry in the channel's pages list |
| 99 | * @descs: array of DMA descriptors |
| 100 | * @chunks: array of transfer chunk descriptors |
| 101 | */ |
| 102 | struct rcar_dmac_desc_page { |
| 103 | struct list_head node; |
| 104 | |
| 105 | union { |
| 106 | struct rcar_dmac_desc descs[0]; |
| 107 | struct rcar_dmac_xfer_chunk chunks[0]; |
| 108 | }; |
| 109 | }; |
| 110 | |
| 111 | #define RCAR_DMAC_DESCS_PER_PAGE \ |
| 112 | ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \ |
| 113 | sizeof(struct rcar_dmac_desc)) |
| 114 | #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \ |
| 115 | ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ |
| 116 | sizeof(struct rcar_dmac_xfer_chunk)) |
| 117 | |
| 118 | /* |
Niklas Söderlund | c5ed08e | 2016-08-10 13:22:18 +0200 | [diff] [blame] | 119 | * struct rcar_dmac_chan_slave - Slave configuration |
| 120 | * @slave_addr: slave memory address |
| 121 | * @xfer_size: size (in bytes) of hardware transfers |
| 122 | */ |
| 123 | struct rcar_dmac_chan_slave { |
| 124 | phys_addr_t slave_addr; |
| 125 | unsigned int xfer_size; |
| 126 | }; |
| 127 | |
| 128 | /* |
Niklas Söderlund | 9f87860 | 2016-08-10 13:22:19 +0200 | [diff] [blame] | 129 | * struct rcar_dmac_chan_map - Map of slave device phys to dma address |
| 130 | * @addr: slave dma address |
| 131 | * @dir: direction of mapping |
| 132 | * @slave: slave configuration that is mapped |
| 133 | */ |
| 134 | struct rcar_dmac_chan_map { |
| 135 | dma_addr_t addr; |
| 136 | enum dma_data_direction dir; |
| 137 | struct rcar_dmac_chan_slave slave; |
| 138 | }; |
| 139 | |
| 140 | /* |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 141 | * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel |
| 142 | * @chan: base DMA channel object |
| 143 | * @iomem: channel I/O memory base |
| 144 | * @index: index of this channel in the controller |
Niklas Söderlund | 427d5ec | 2017-05-16 01:09:15 +0200 | [diff] [blame] | 145 | * @irq: channel IRQ |
Niklas Söderlund | c5ed08e | 2016-08-10 13:22:18 +0200 | [diff] [blame] | 146 | * @src: slave memory address and size on the source side |
| 147 | * @dst: slave memory address and size on the destination side |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 148 | * @mid_rid: hardware MID/RID for the DMA client using this channel |
| 149 | * @lock: protects the channel CHCR register and the desc members |
| 150 | * @desc.free: list of free descriptors |
| 151 | * @desc.pending: list of pending descriptors (submitted with tx_submit) |
| 152 | * @desc.active: list of active descriptors (activated with issue_pending) |
| 153 | * @desc.done: list of completed descriptors |
| 154 | * @desc.wait: list of descriptors waiting for an ack |
| 155 | * @desc.running: the descriptor being processed (a member of the active list) |
| 156 | * @desc.chunks_free: list of free transfer chunk descriptors |
| 157 | * @desc.pages: list of pages used by allocated descriptors |
| 158 | */ |
| 159 | struct rcar_dmac_chan { |
| 160 | struct dma_chan chan; |
| 161 | void __iomem *iomem; |
| 162 | unsigned int index; |
Niklas Söderlund | 427d5ec | 2017-05-16 01:09:15 +0200 | [diff] [blame] | 163 | int irq; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 164 | |
Niklas Söderlund | c5ed08e | 2016-08-10 13:22:18 +0200 | [diff] [blame] | 165 | struct rcar_dmac_chan_slave src; |
| 166 | struct rcar_dmac_chan_slave dst; |
Niklas Söderlund | 9f87860 | 2016-08-10 13:22:19 +0200 | [diff] [blame] | 167 | struct rcar_dmac_chan_map map; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 168 | int mid_rid; |
| 169 | |
| 170 | spinlock_t lock; |
| 171 | |
| 172 | struct { |
| 173 | struct list_head free; |
| 174 | struct list_head pending; |
| 175 | struct list_head active; |
| 176 | struct list_head done; |
| 177 | struct list_head wait; |
| 178 | struct rcar_dmac_desc *running; |
| 179 | |
| 180 | struct list_head chunks_free; |
| 181 | |
| 182 | struct list_head pages; |
| 183 | } desc; |
| 184 | }; |
| 185 | |
| 186 | #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) |
| 187 | |
| 188 | /* |
| 189 | * struct rcar_dmac - R-Car Gen2 DMA Controller |
| 190 | * @engine: base DMA engine object |
| 191 | * @dev: the hardware device |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 192 | * @dmac_base: remapped base register block |
| 193 | * @chan_base: remapped channel register block (optional) |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 194 | * @n_channels: number of available channels |
| 195 | * @channels: array of DMAC channels |
Yoshihiro Shimoda | cf24aac | 2019-09-02 20:44:03 +0900 | [diff] [blame] | 196 | * @channels_mask: bitfield of which DMA channels are managed by this driver |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 197 | * @modules: bitmask of client modules in use |
| 198 | */ |
| 199 | struct rcar_dmac { |
| 200 | struct dma_device engine; |
| 201 | struct device *dev; |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 202 | void __iomem *dmac_base; |
| 203 | void __iomem *chan_base; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 204 | |
| 205 | unsigned int n_channels; |
| 206 | struct rcar_dmac_chan *channels; |
Yoshihiro Shimoda | fcf8adb | 2019-09-09 15:34:52 +0900 | [diff] [blame] | 207 | u32 channels_mask; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 208 | |
Joe Perches | 08acf38 | 2015-05-19 18:37:50 -0700 | [diff] [blame] | 209 | DECLARE_BITMAP(modules, 256); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 210 | }; |
| 211 | |
| 212 | #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) |
| 213 | |
Geert Uytterhoeven | d249b5f | 2021-01-28 09:44:53 +0100 | [diff] [blame] | 214 | #define for_each_rcar_dmac_chan(i, dmac, chan) \ |
| 215 | for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \ |
| 216 | if (!((dmac)->channels_mask & BIT(i))) continue; else |
| 217 | |
Yoshihiro Shimoda | 2df4a02 | 2019-09-09 15:34:50 +0900 | [diff] [blame] | 218 | /* |
| 219 | * struct rcar_dmac_of_data - This driver's OF data |
| 220 | * @chan_offset_base: DMAC channels base offset |
| 221 | * @chan_offset_stride: DMAC channels offset stride |
| 222 | */ |
| 223 | struct rcar_dmac_of_data { |
| 224 | u32 chan_offset_base; |
| 225 | u32 chan_offset_stride; |
| 226 | }; |
| 227 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 228 | /* ----------------------------------------------------------------------------- |
| 229 | * Registers |
| 230 | */ |
| 231 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 232 | #define RCAR_DMAISTA 0x0020 |
| 233 | #define RCAR_DMASEC 0x0030 |
| 234 | #define RCAR_DMAOR 0x0060 |
| 235 | #define RCAR_DMAOR_PRI_FIXED (0 << 8) |
| 236 | #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) |
| 237 | #define RCAR_DMAOR_AE (1 << 2) |
| 238 | #define RCAR_DMAOR_DME (1 << 0) |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 239 | #define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */ |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 240 | #define RCAR_DMADPSEC 0x00a0 |
| 241 | |
| 242 | #define RCAR_DMASAR 0x0000 |
| 243 | #define RCAR_DMADAR 0x0004 |
| 244 | #define RCAR_DMATCR 0x0008 |
| 245 | #define RCAR_DMATCR_MASK 0x00ffffff |
| 246 | #define RCAR_DMATSR 0x0028 |
| 247 | #define RCAR_DMACHCR 0x000c |
| 248 | #define RCAR_DMACHCR_CAE (1 << 31) |
| 249 | #define RCAR_DMACHCR_CAIE (1 << 30) |
| 250 | #define RCAR_DMACHCR_DPM_DISABLED (0 << 28) |
| 251 | #define RCAR_DMACHCR_DPM_ENABLED (1 << 28) |
| 252 | #define RCAR_DMACHCR_DPM_REPEAT (2 << 28) |
| 253 | #define RCAR_DMACHCR_DPM_INFINITE (3 << 28) |
| 254 | #define RCAR_DMACHCR_RPT_SAR (1 << 27) |
| 255 | #define RCAR_DMACHCR_RPT_DAR (1 << 26) |
| 256 | #define RCAR_DMACHCR_RPT_TCR (1 << 25) |
| 257 | #define RCAR_DMACHCR_DPB (1 << 22) |
| 258 | #define RCAR_DMACHCR_DSE (1 << 19) |
| 259 | #define RCAR_DMACHCR_DSIE (1 << 18) |
| 260 | #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3)) |
| 261 | #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3)) |
| 262 | #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3)) |
| 263 | #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3)) |
| 264 | #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3)) |
| 265 | #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3)) |
| 266 | #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3)) |
| 267 | #define RCAR_DMACHCR_DM_FIXED (0 << 14) |
| 268 | #define RCAR_DMACHCR_DM_INC (1 << 14) |
| 269 | #define RCAR_DMACHCR_DM_DEC (2 << 14) |
| 270 | #define RCAR_DMACHCR_SM_FIXED (0 << 12) |
| 271 | #define RCAR_DMACHCR_SM_INC (1 << 12) |
| 272 | #define RCAR_DMACHCR_SM_DEC (2 << 12) |
| 273 | #define RCAR_DMACHCR_RS_AUTO (4 << 8) |
| 274 | #define RCAR_DMACHCR_RS_DMARS (8 << 8) |
| 275 | #define RCAR_DMACHCR_IE (1 << 2) |
| 276 | #define RCAR_DMACHCR_TE (1 << 1) |
| 277 | #define RCAR_DMACHCR_DE (1 << 0) |
| 278 | #define RCAR_DMATCRB 0x0018 |
| 279 | #define RCAR_DMATSRB 0x0038 |
| 280 | #define RCAR_DMACHCRB 0x001c |
| 281 | #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 282 | #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16) |
| 283 | #define RCAR_DMACHCRB_DPTR_SHIFT 16 |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 284 | #define RCAR_DMACHCRB_DRST (1 << 15) |
| 285 | #define RCAR_DMACHCRB_DTS (1 << 8) |
| 286 | #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) |
| 287 | #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) |
| 288 | #define RCAR_DMACHCRB_PRI(n) ((n) << 0) |
| 289 | #define RCAR_DMARS 0x0040 |
| 290 | #define RCAR_DMABUFCR 0x0048 |
| 291 | #define RCAR_DMABUFCR_MBU(n) ((n) << 16) |
| 292 | #define RCAR_DMABUFCR_ULB(n) ((n) << 0) |
| 293 | #define RCAR_DMADPBASE 0x0050 |
| 294 | #define RCAR_DMADPBASE_MASK 0xfffffff0 |
| 295 | #define RCAR_DMADPBASE_SEL (1 << 0) |
| 296 | #define RCAR_DMADPCR 0x0054 |
| 297 | #define RCAR_DMADPCR_DIPT(n) ((n) << 24) |
| 298 | #define RCAR_DMAFIXSAR 0x0010 |
| 299 | #define RCAR_DMAFIXDAR 0x0014 |
| 300 | #define RCAR_DMAFIXDPBASE 0x0060 |
| 301 | |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 302 | /* For R-Car V3U */ |
| 303 | #define RCAR_V3U_DMACHCLR 0x0100 |
| 304 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 305 | /* Hardcode the MEMCPY transfer size to 4 bytes. */ |
| 306 | #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 |
| 307 | |
| 308 | /* ----------------------------------------------------------------------------- |
| 309 | * Device access |
| 310 | */ |
| 311 | |
| 312 | static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) |
| 313 | { |
| 314 | if (reg == RCAR_DMAOR) |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 315 | writew(data, dmac->dmac_base + reg); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 316 | else |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 317 | writel(data, dmac->dmac_base + reg); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) |
| 321 | { |
| 322 | if (reg == RCAR_DMAOR) |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 323 | return readw(dmac->dmac_base + reg); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 324 | else |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 325 | return readl(dmac->dmac_base + reg); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) |
| 329 | { |
| 330 | if (reg == RCAR_DMARS) |
| 331 | return readw(chan->iomem + reg); |
| 332 | else |
| 333 | return readl(chan->iomem + reg); |
| 334 | } |
| 335 | |
| 336 | static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) |
| 337 | { |
| 338 | if (reg == RCAR_DMARS) |
| 339 | writew(data, chan->iomem + reg); |
| 340 | else |
| 341 | writel(data, chan->iomem + reg); |
| 342 | } |
| 343 | |
Geert Uytterhoeven | 245bbd1 | 2021-01-28 09:44:54 +0100 | [diff] [blame] | 344 | static void rcar_dmac_chan_clear(struct rcar_dmac *dmac, |
| 345 | struct rcar_dmac_chan *chan) |
| 346 | { |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 347 | if (dmac->chan_base) |
| 348 | rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1); |
| 349 | else |
| 350 | rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index)); |
Geert Uytterhoeven | 245bbd1 | 2021-01-28 09:44:54 +0100 | [diff] [blame] | 351 | } |
| 352 | |
| 353 | static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac) |
| 354 | { |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 355 | struct rcar_dmac_chan *chan; |
| 356 | unsigned int i; |
| 357 | |
| 358 | if (dmac->chan_base) { |
| 359 | for_each_rcar_dmac_chan(i, dmac, chan) |
| 360 | rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1); |
| 361 | } else { |
| 362 | rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); |
| 363 | } |
Geert Uytterhoeven | 245bbd1 | 2021-01-28 09:44:54 +0100 | [diff] [blame] | 364 | } |
| 365 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 366 | /* ----------------------------------------------------------------------------- |
| 367 | * Initialization and configuration |
| 368 | */ |
| 369 | |
| 370 | static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) |
| 371 | { |
| 372 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); |
| 373 | |
Niklas Söderlund | 0f78e3b | 2016-06-30 17:15:16 +0200 | [diff] [blame] | 374 | return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 375 | } |
| 376 | |
| 377 | static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) |
| 378 | { |
| 379 | struct rcar_dmac_desc *desc = chan->desc.running; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 380 | u32 chcr = desc->chcr; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 381 | |
| 382 | WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); |
| 383 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 384 | if (chan->mid_rid >= 0) |
| 385 | rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); |
| 386 | |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 387 | if (desc->hwdescs.use) { |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 388 | struct rcar_dmac_xfer_chunk *chunk = |
| 389 | list_first_entry(&desc->chunks, |
| 390 | struct rcar_dmac_xfer_chunk, node); |
Laurent Pinchart | 3f46306 | 2015-01-27 18:33:29 +0200 | [diff] [blame] | 391 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 392 | dev_dbg(chan->chan.device->dev, |
| 393 | "chan%u: queue desc %p: %u@%pad\n", |
| 394 | chan->index, desc, desc->nchunks, &desc->hwdescs.dma); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 395 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 396 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 397 | rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, |
| 398 | chunk->src_addr >> 32); |
| 399 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, |
| 400 | chunk->dst_addr >> 32); |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 401 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, |
| 402 | desc->hwdescs.dma >> 32); |
| 403 | #endif |
| 404 | rcar_dmac_chan_write(chan, RCAR_DMADPBASE, |
| 405 | (desc->hwdescs.dma & 0xfffffff0) | |
| 406 | RCAR_DMADPBASE_SEL); |
| 407 | rcar_dmac_chan_write(chan, RCAR_DMACHCRB, |
| 408 | RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | |
| 409 | RCAR_DMACHCRB_DRST); |
| 410 | |
| 411 | /* |
Laurent Pinchart | 3f46306 | 2015-01-27 18:33:29 +0200 | [diff] [blame] | 412 | * Errata: When descriptor memory is accessed through an IOMMU |
| 413 | * the DMADAR register isn't initialized automatically from the |
| 414 | * first descriptor at beginning of transfer by the DMAC like it |
| 415 | * should. Initialize it manually with the destination address |
| 416 | * of the first chunk. |
| 417 | */ |
Laurent Pinchart | 3f46306 | 2015-01-27 18:33:29 +0200 | [diff] [blame] | 418 | rcar_dmac_chan_write(chan, RCAR_DMADAR, |
| 419 | chunk->dst_addr & 0xffffffff); |
| 420 | |
| 421 | /* |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 422 | * Program the descriptor stage interrupt to occur after the end |
| 423 | * of the first stage. |
| 424 | */ |
| 425 | rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1)); |
| 426 | |
| 427 | chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR |
| 428 | | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB; |
| 429 | |
| 430 | /* |
| 431 | * If the descriptor isn't cyclic enable normal descriptor mode |
| 432 | * and the transfer completion interrupt. |
| 433 | */ |
| 434 | if (!desc->cyclic) |
| 435 | chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE; |
| 436 | /* |
| 437 | * If the descriptor is cyclic and has a callback enable the |
| 438 | * descriptor stage interrupt in infinite repeat mode. |
| 439 | */ |
| 440 | else if (desc->async_tx.callback) |
| 441 | chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE; |
| 442 | /* |
| 443 | * Otherwise just select infinite repeat mode without any |
| 444 | * interrupt. |
| 445 | */ |
| 446 | else |
| 447 | chcr |= RCAR_DMACHCR_DPM_INFINITE; |
| 448 | } else { |
| 449 | struct rcar_dmac_xfer_chunk *chunk = desc->running; |
| 450 | |
| 451 | dev_dbg(chan->chan.device->dev, |
| 452 | "chan%u: queue chunk %p: %u@%pad -> %pad\n", |
| 453 | chan->index, chunk, chunk->size, &chunk->src_addr, |
| 454 | &chunk->dst_addr); |
| 455 | |
| 456 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
| 457 | rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, |
| 458 | chunk->src_addr >> 32); |
| 459 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, |
| 460 | chunk->dst_addr >> 32); |
| 461 | #endif |
| 462 | rcar_dmac_chan_write(chan, RCAR_DMASAR, |
| 463 | chunk->src_addr & 0xffffffff); |
| 464 | rcar_dmac_chan_write(chan, RCAR_DMADAR, |
| 465 | chunk->dst_addr & 0xffffffff); |
| 466 | rcar_dmac_chan_write(chan, RCAR_DMATCR, |
| 467 | chunk->size >> desc->xfer_shift); |
| 468 | |
| 469 | chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; |
| 470 | } |
| 471 | |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 472 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, |
| 473 | chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | static int rcar_dmac_init(struct rcar_dmac *dmac) |
| 477 | { |
| 478 | u16 dmaor; |
| 479 | |
| 480 | /* Clear all channels and enable the DMAC globally. */ |
Geert Uytterhoeven | 245bbd1 | 2021-01-28 09:44:54 +0100 | [diff] [blame] | 481 | rcar_dmac_chan_clear_all(dmac); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 482 | rcar_dmac_write(dmac, RCAR_DMAOR, |
| 483 | RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); |
| 484 | |
| 485 | dmaor = rcar_dmac_read(dmac, RCAR_DMAOR); |
| 486 | if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) { |
| 487 | dev_warn(dmac->dev, "DMAOR initialization failed.\n"); |
| 488 | return -EIO; |
| 489 | } |
| 490 | |
| 491 | return 0; |
| 492 | } |
| 493 | |
| 494 | /* ----------------------------------------------------------------------------- |
| 495 | * Descriptors submission |
| 496 | */ |
| 497 | |
| 498 | static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx) |
| 499 | { |
| 500 | struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); |
| 501 | struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); |
| 502 | unsigned long flags; |
| 503 | dma_cookie_t cookie; |
| 504 | |
| 505 | spin_lock_irqsave(&chan->lock, flags); |
| 506 | |
| 507 | cookie = dma_cookie_assign(tx); |
| 508 | |
| 509 | dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", |
| 510 | chan->index, tx->cookie, desc); |
| 511 | |
| 512 | list_add_tail(&desc->node, &chan->desc.pending); |
| 513 | desc->running = list_first_entry(&desc->chunks, |
| 514 | struct rcar_dmac_xfer_chunk, node); |
| 515 | |
| 516 | spin_unlock_irqrestore(&chan->lock, flags); |
| 517 | |
| 518 | return cookie; |
| 519 | } |
| 520 | |
| 521 | /* ----------------------------------------------------------------------------- |
| 522 | * Descriptors allocation and free |
| 523 | */ |
| 524 | |
| 525 | /* |
| 526 | * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors |
| 527 | * @chan: the DMA channel |
| 528 | * @gfp: allocation flags |
| 529 | */ |
| 530 | static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) |
| 531 | { |
| 532 | struct rcar_dmac_desc_page *page; |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 533 | unsigned long flags; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 534 | LIST_HEAD(list); |
| 535 | unsigned int i; |
| 536 | |
| 537 | page = (void *)get_zeroed_page(gfp); |
| 538 | if (!page) |
| 539 | return -ENOMEM; |
| 540 | |
| 541 | for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) { |
| 542 | struct rcar_dmac_desc *desc = &page->descs[i]; |
| 543 | |
| 544 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); |
| 545 | desc->async_tx.tx_submit = rcar_dmac_tx_submit; |
| 546 | INIT_LIST_HEAD(&desc->chunks); |
| 547 | |
| 548 | list_add_tail(&desc->node, &list); |
| 549 | } |
| 550 | |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 551 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 552 | list_splice_tail(&list, &chan->desc.free); |
| 553 | list_add_tail(&page->node, &chan->desc.pages); |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 554 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 555 | |
| 556 | return 0; |
| 557 | } |
| 558 | |
| 559 | /* |
| 560 | * rcar_dmac_desc_put - Release a DMA transfer descriptor |
| 561 | * @chan: the DMA channel |
| 562 | * @desc: the descriptor |
| 563 | * |
| 564 | * Put the descriptor and its transfer chunk descriptors back in the channel's |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 565 | * free descriptors lists. The descriptor's chunks list will be reinitialized to |
| 566 | * an empty list as a result. |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 567 | * |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 568 | * The descriptor must have been removed from the channel's lists before calling |
| 569 | * this function. |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 570 | */ |
| 571 | static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, |
| 572 | struct rcar_dmac_desc *desc) |
| 573 | { |
Laurent Pinchart | f391507 | 2015-01-27 15:52:13 +0200 | [diff] [blame] | 574 | unsigned long flags; |
| 575 | |
| 576 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 577 | list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); |
Kuninori Morimoto | 3565fe5 | 2016-05-30 00:41:48 +0000 | [diff] [blame] | 578 | list_add(&desc->node, &chan->desc.free); |
Laurent Pinchart | f391507 | 2015-01-27 15:52:13 +0200 | [diff] [blame] | 579 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 580 | } |
| 581 | |
| 582 | static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) |
| 583 | { |
| 584 | struct rcar_dmac_desc *desc, *_desc; |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 585 | unsigned long flags; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 586 | LIST_HEAD(list); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 587 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 588 | /* |
| 589 | * We have to temporarily move all descriptors from the wait list to a |
| 590 | * local list as iterating over the wait list, even with |
| 591 | * list_for_each_entry_safe, isn't safe if we release the channel lock |
| 592 | * around the rcar_dmac_desc_put() call. |
| 593 | */ |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 594 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 595 | list_splice_init(&chan->desc.wait, &list); |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 596 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 597 | |
| 598 | list_for_each_entry_safe(desc, _desc, &list, node) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 599 | if (async_tx_test_ack(&desc->async_tx)) { |
| 600 | list_del(&desc->node); |
| 601 | rcar_dmac_desc_put(chan, desc); |
| 602 | } |
| 603 | } |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 604 | |
| 605 | if (list_empty(&list)) |
| 606 | return; |
| 607 | |
| 608 | /* Put the remaining descriptors back in the wait list. */ |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 609 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 610 | list_splice(&list, &chan->desc.wait); |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 611 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 612 | } |
| 613 | |
| 614 | /* |
| 615 | * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer |
| 616 | * @chan: the DMA channel |
| 617 | * |
| 618 | * Locking: This function must be called in a non-atomic context. |
| 619 | * |
| 620 | * Return: A pointer to the allocated descriptor or NULL if no descriptor can |
| 621 | * be allocated. |
| 622 | */ |
| 623 | static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) |
| 624 | { |
| 625 | struct rcar_dmac_desc *desc; |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 626 | unsigned long flags; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 627 | int ret; |
| 628 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 629 | /* Recycle acked descriptors before attempting allocation. */ |
| 630 | rcar_dmac_desc_recycle_acked(chan); |
| 631 | |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 632 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 633 | |
Laurent Pinchart | a55e07c | 2015-01-08 18:29:25 +0200 | [diff] [blame] | 634 | while (list_empty(&chan->desc.free)) { |
| 635 | /* |
| 636 | * No free descriptors, allocate a page worth of them and try |
| 637 | * again, as someone else could race us to get the newly |
| 638 | * allocated descriptors. If the allocation fails return an |
| 639 | * error. |
| 640 | */ |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 641 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | a55e07c | 2015-01-08 18:29:25 +0200 | [diff] [blame] | 642 | ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); |
| 643 | if (ret < 0) |
| 644 | return NULL; |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 645 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | a55e07c | 2015-01-08 18:29:25 +0200 | [diff] [blame] | 646 | } |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 647 | |
Laurent Pinchart | a55e07c | 2015-01-08 18:29:25 +0200 | [diff] [blame] | 648 | desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); |
| 649 | list_del(&desc->node); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 650 | |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 651 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 652 | |
| 653 | return desc; |
| 654 | } |
| 655 | |
| 656 | /* |
| 657 | * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks |
| 658 | * @chan: the DMA channel |
| 659 | * @gfp: allocation flags |
| 660 | */ |
| 661 | static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) |
| 662 | { |
| 663 | struct rcar_dmac_desc_page *page; |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 664 | unsigned long flags; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 665 | LIST_HEAD(list); |
| 666 | unsigned int i; |
| 667 | |
| 668 | page = (void *)get_zeroed_page(gfp); |
| 669 | if (!page) |
| 670 | return -ENOMEM; |
| 671 | |
| 672 | for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) { |
| 673 | struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; |
| 674 | |
| 675 | list_add_tail(&chunk->node, &list); |
| 676 | } |
| 677 | |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 678 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 679 | list_splice_tail(&list, &chan->desc.chunks_free); |
| 680 | list_add_tail(&page->node, &chan->desc.pages); |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 681 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 682 | |
| 683 | return 0; |
| 684 | } |
| 685 | |
| 686 | /* |
| 687 | * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer |
| 688 | * @chan: the DMA channel |
| 689 | * |
| 690 | * Locking: This function must be called in a non-atomic context. |
| 691 | * |
| 692 | * Return: A pointer to the allocated transfer chunk descriptor or NULL if no |
| 693 | * descriptor can be allocated. |
| 694 | */ |
| 695 | static struct rcar_dmac_xfer_chunk * |
| 696 | rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) |
| 697 | { |
| 698 | struct rcar_dmac_xfer_chunk *chunk; |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 699 | unsigned long flags; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 700 | int ret; |
| 701 | |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 702 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 703 | |
Laurent Pinchart | a55e07c | 2015-01-08 18:29:25 +0200 | [diff] [blame] | 704 | while (list_empty(&chan->desc.chunks_free)) { |
| 705 | /* |
| 706 | * No free descriptors, allocate a page worth of them and try |
| 707 | * again, as someone else could race us to get the newly |
| 708 | * allocated descriptors. If the allocation fails return an |
| 709 | * error. |
| 710 | */ |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 711 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | a55e07c | 2015-01-08 18:29:25 +0200 | [diff] [blame] | 712 | ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); |
| 713 | if (ret < 0) |
| 714 | return NULL; |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 715 | spin_lock_irqsave(&chan->lock, flags); |
Laurent Pinchart | a55e07c | 2015-01-08 18:29:25 +0200 | [diff] [blame] | 716 | } |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 717 | |
Laurent Pinchart | a55e07c | 2015-01-08 18:29:25 +0200 | [diff] [blame] | 718 | chunk = list_first_entry(&chan->desc.chunks_free, |
| 719 | struct rcar_dmac_xfer_chunk, node); |
| 720 | list_del(&chunk->node); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 721 | |
Kuninori Morimoto | d23c9a0 | 2015-05-21 03:48:38 +0000 | [diff] [blame] | 722 | spin_unlock_irqrestore(&chan->lock, flags); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 723 | |
| 724 | return chunk; |
| 725 | } |
| 726 | |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 727 | static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, |
| 728 | struct rcar_dmac_desc *desc, size_t size) |
| 729 | { |
| 730 | /* |
| 731 | * dma_alloc_coherent() allocates memory in page size increments. To |
| 732 | * avoid reallocating the hardware descriptors when the allocated size |
| 733 | * wouldn't change align the requested size to a multiple of the page |
| 734 | * size. |
| 735 | */ |
| 736 | size = PAGE_ALIGN(size); |
| 737 | |
| 738 | if (desc->hwdescs.size == size) |
| 739 | return; |
| 740 | |
| 741 | if (desc->hwdescs.mem) { |
Laurent Pinchart | 6a63480 | 2015-01-27 15:58:53 +0200 | [diff] [blame] | 742 | dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, |
| 743 | desc->hwdescs.mem, desc->hwdescs.dma); |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 744 | desc->hwdescs.mem = NULL; |
| 745 | desc->hwdescs.size = 0; |
| 746 | } |
| 747 | |
| 748 | if (!size) |
| 749 | return; |
| 750 | |
Laurent Pinchart | 6a63480 | 2015-01-27 15:58:53 +0200 | [diff] [blame] | 751 | desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, |
| 752 | &desc->hwdescs.dma, GFP_NOWAIT); |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 753 | if (!desc->hwdescs.mem) |
| 754 | return; |
| 755 | |
| 756 | desc->hwdescs.size = size; |
| 757 | } |
| 758 | |
Jürg Billeter | ee4b876 | 2014-11-25 15:10:17 +0100 | [diff] [blame] | 759 | static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, |
| 760 | struct rcar_dmac_desc *desc) |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 761 | { |
| 762 | struct rcar_dmac_xfer_chunk *chunk; |
| 763 | struct rcar_dmac_hw_desc *hwdesc; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 764 | |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 765 | rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); |
| 766 | |
| 767 | hwdesc = desc->hwdescs.mem; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 768 | if (!hwdesc) |
Jürg Billeter | ee4b876 | 2014-11-25 15:10:17 +0100 | [diff] [blame] | 769 | return -ENOMEM; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 770 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 771 | list_for_each_entry(chunk, &desc->chunks, node) { |
| 772 | hwdesc->sar = chunk->src_addr; |
| 773 | hwdesc->dar = chunk->dst_addr; |
| 774 | hwdesc->tcr = chunk->size >> desc->xfer_shift; |
| 775 | hwdesc++; |
| 776 | } |
Jürg Billeter | ee4b876 | 2014-11-25 15:10:17 +0100 | [diff] [blame] | 777 | |
| 778 | return 0; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 779 | } |
| 780 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 781 | /* ----------------------------------------------------------------------------- |
| 782 | * Stop and reset |
| 783 | */ |
Kuninori Morimoto | a8d46a7 | 2017-11-17 11:00:28 +0900 | [diff] [blame] | 784 | static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan) |
| 785 | { |
| 786 | u32 chcr; |
| 787 | unsigned int i; |
| 788 | |
| 789 | /* |
| 790 | * Ensure that the setting of the DE bit is actually 0 after |
| 791 | * clearing it. |
| 792 | */ |
| 793 | for (i = 0; i < 1024; i++) { |
| 794 | chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); |
| 795 | if (!(chcr & RCAR_DMACHCR_DE)) |
| 796 | return; |
| 797 | udelay(1); |
| 798 | } |
| 799 | |
| 800 | dev_err(chan->chan.device->dev, "CHCR DE check error\n"); |
| 801 | } |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 802 | |
Yoshihiro Shimoda | 4de1247 | 2018-07-11 11:10:15 +0900 | [diff] [blame] | 803 | static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan) |
| 804 | { |
| 805 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); |
| 806 | |
| 807 | /* set DE=0 and flush remaining data */ |
| 808 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE)); |
| 809 | |
| 810 | /* make sure all remaining data was flushed */ |
| 811 | rcar_dmac_chcr_de_barrier(chan); |
| 812 | } |
| 813 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 814 | static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) |
| 815 | { |
| 816 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); |
| 817 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 818 | chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 819 | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE | |
| 820 | RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 821 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); |
Kuninori Morimoto | a8d46a7 | 2017-11-17 11:00:28 +0900 | [diff] [blame] | 822 | rcar_dmac_chcr_de_barrier(chan); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 823 | } |
| 824 | |
| 825 | static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) |
| 826 | { |
| 827 | struct rcar_dmac_desc *desc, *_desc; |
| 828 | unsigned long flags; |
| 829 | LIST_HEAD(descs); |
| 830 | |
| 831 | spin_lock_irqsave(&chan->lock, flags); |
| 832 | |
| 833 | /* Move all non-free descriptors to the local lists. */ |
| 834 | list_splice_init(&chan->desc.pending, &descs); |
| 835 | list_splice_init(&chan->desc.active, &descs); |
| 836 | list_splice_init(&chan->desc.done, &descs); |
| 837 | list_splice_init(&chan->desc.wait, &descs); |
| 838 | |
| 839 | chan->desc.running = NULL; |
| 840 | |
| 841 | spin_unlock_irqrestore(&chan->lock, flags); |
| 842 | |
| 843 | list_for_each_entry_safe(desc, _desc, &descs, node) { |
| 844 | list_del(&desc->node); |
| 845 | rcar_dmac_desc_put(chan, desc); |
| 846 | } |
| 847 | } |
| 848 | |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 849 | static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 850 | { |
Geert Uytterhoeven | d249b5f | 2021-01-28 09:44:53 +0100 | [diff] [blame] | 851 | struct rcar_dmac_chan *chan; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 852 | unsigned int i; |
| 853 | |
| 854 | /* Stop all channels. */ |
Geert Uytterhoeven | d249b5f | 2021-01-28 09:44:53 +0100 | [diff] [blame] | 855 | for_each_rcar_dmac_chan(i, dmac, chan) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 856 | /* Stop and reinitialize the channel. */ |
Geert Uytterhoeven | 45c9a60 | 2018-07-02 17:02:06 +0200 | [diff] [blame] | 857 | spin_lock_irq(&chan->lock); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 858 | rcar_dmac_chan_halt(chan); |
Geert Uytterhoeven | 45c9a60 | 2018-07-02 17:02:06 +0200 | [diff] [blame] | 859 | spin_unlock_irq(&chan->lock); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 860 | } |
| 861 | } |
| 862 | |
Yoshihiro Shimoda | 8115ce7 | 2018-07-11 11:10:16 +0900 | [diff] [blame] | 863 | static int rcar_dmac_chan_pause(struct dma_chan *chan) |
| 864 | { |
| 865 | unsigned long flags; |
| 866 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 867 | |
| 868 | spin_lock_irqsave(&rchan->lock, flags); |
| 869 | rcar_dmac_clear_chcr_de(rchan); |
| 870 | spin_unlock_irqrestore(&rchan->lock, flags); |
| 871 | |
| 872 | return 0; |
| 873 | } |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 874 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 875 | /* ----------------------------------------------------------------------------- |
| 876 | * Descriptors preparation |
| 877 | */ |
| 878 | |
| 879 | static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, |
| 880 | struct rcar_dmac_desc *desc) |
| 881 | { |
| 882 | static const u32 chcr_ts[] = { |
| 883 | RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B, |
| 884 | RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B, |
| 885 | RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B, |
| 886 | RCAR_DMACHCR_TS_64B, |
| 887 | }; |
| 888 | |
| 889 | unsigned int xfer_size; |
| 890 | u32 chcr; |
| 891 | |
| 892 | switch (desc->direction) { |
| 893 | case DMA_DEV_TO_MEM: |
| 894 | chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED |
| 895 | | RCAR_DMACHCR_RS_DMARS; |
Niklas Söderlund | c5ed08e | 2016-08-10 13:22:18 +0200 | [diff] [blame] | 896 | xfer_size = chan->src.xfer_size; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 897 | break; |
| 898 | |
| 899 | case DMA_MEM_TO_DEV: |
| 900 | chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC |
| 901 | | RCAR_DMACHCR_RS_DMARS; |
Niklas Söderlund | c5ed08e | 2016-08-10 13:22:18 +0200 | [diff] [blame] | 902 | xfer_size = chan->dst.xfer_size; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 903 | break; |
| 904 | |
| 905 | case DMA_MEM_TO_MEM: |
| 906 | default: |
| 907 | chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC |
| 908 | | RCAR_DMACHCR_RS_AUTO; |
| 909 | xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE; |
| 910 | break; |
| 911 | } |
| 912 | |
| 913 | desc->xfer_shift = ilog2(xfer_size); |
| 914 | desc->chcr = chcr | chcr_ts[desc->xfer_shift]; |
| 915 | } |
| 916 | |
| 917 | /* |
| 918 | * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list |
| 919 | * |
| 920 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also |
| 921 | * converted to scatter-gather to guarantee consistent locking and a correct |
| 922 | * list manipulation. For slave DMA direction carries the usual meaning, and, |
| 923 | * logically, the SG list is RAM and the addr variable contains slave address, |
| 924 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM |
| 925 | * and the SG list contains only one element and points at the source buffer. |
| 926 | */ |
| 927 | static struct dma_async_tx_descriptor * |
| 928 | rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, |
| 929 | unsigned int sg_len, dma_addr_t dev_addr, |
| 930 | enum dma_transfer_direction dir, unsigned long dma_flags, |
| 931 | bool cyclic) |
| 932 | { |
| 933 | struct rcar_dmac_xfer_chunk *chunk; |
| 934 | struct rcar_dmac_desc *desc; |
| 935 | struct scatterlist *sg; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 936 | unsigned int nchunks = 0; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 937 | unsigned int max_chunk_size; |
| 938 | unsigned int full_size = 0; |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 939 | bool cross_boundary = false; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 940 | unsigned int i; |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 941 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
| 942 | u32 high_dev_addr; |
| 943 | u32 high_mem_addr; |
| 944 | #endif |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 945 | |
| 946 | desc = rcar_dmac_desc_get(chan); |
| 947 | if (!desc) |
| 948 | return NULL; |
| 949 | |
| 950 | desc->async_tx.flags = dma_flags; |
| 951 | desc->async_tx.cookie = -EBUSY; |
| 952 | |
| 953 | desc->cyclic = cyclic; |
| 954 | desc->direction = dir; |
| 955 | |
| 956 | rcar_dmac_chan_configure_desc(chan, desc); |
| 957 | |
Yoshihiro Shimoda | d716d9b | 2018-02-14 18:40:12 +0900 | [diff] [blame] | 958 | max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 959 | |
| 960 | /* |
| 961 | * Allocate and fill the transfer chunk descriptors. We own the only |
| 962 | * reference to the DMA descriptor, there's no need for locking. |
| 963 | */ |
| 964 | for_each_sg(sgl, sg, sg_len, i) { |
| 965 | dma_addr_t mem_addr = sg_dma_address(sg); |
| 966 | unsigned int len = sg_dma_len(sg); |
| 967 | |
| 968 | full_size += len; |
| 969 | |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 970 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
| 971 | if (i == 0) { |
| 972 | high_dev_addr = dev_addr >> 32; |
| 973 | high_mem_addr = mem_addr >> 32; |
| 974 | } |
| 975 | |
| 976 | if ((dev_addr >> 32 != high_dev_addr) || |
| 977 | (mem_addr >> 32 != high_mem_addr)) |
| 978 | cross_boundary = true; |
| 979 | #endif |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 980 | while (len) { |
| 981 | unsigned int size = min(len, max_chunk_size); |
| 982 | |
| 983 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
| 984 | /* |
| 985 | * Prevent individual transfers from crossing 4GB |
| 986 | * boundaries. |
| 987 | */ |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 988 | if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 989 | size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 990 | cross_boundary = true; |
| 991 | } |
| 992 | if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 993 | size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 994 | cross_boundary = true; |
| 995 | } |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 996 | #endif |
| 997 | |
| 998 | chunk = rcar_dmac_xfer_chunk_get(chan); |
| 999 | if (!chunk) { |
| 1000 | rcar_dmac_desc_put(chan, desc); |
| 1001 | return NULL; |
| 1002 | } |
| 1003 | |
| 1004 | if (dir == DMA_DEV_TO_MEM) { |
| 1005 | chunk->src_addr = dev_addr; |
| 1006 | chunk->dst_addr = mem_addr; |
| 1007 | } else { |
| 1008 | chunk->src_addr = mem_addr; |
| 1009 | chunk->dst_addr = dev_addr; |
| 1010 | } |
| 1011 | |
| 1012 | chunk->size = size; |
| 1013 | |
| 1014 | dev_dbg(chan->chan.device->dev, |
| 1015 | "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", |
| 1016 | chan->index, chunk, desc, i, sg, size, len, |
| 1017 | &chunk->src_addr, &chunk->dst_addr); |
| 1018 | |
| 1019 | mem_addr += size; |
| 1020 | if (dir == DMA_MEM_TO_MEM) |
| 1021 | dev_addr += size; |
| 1022 | |
| 1023 | len -= size; |
| 1024 | |
| 1025 | list_add_tail(&chunk->node, &desc->chunks); |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1026 | nchunks++; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1027 | } |
| 1028 | } |
| 1029 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1030 | desc->nchunks = nchunks; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1031 | desc->size = full_size; |
| 1032 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1033 | /* |
| 1034 | * Use hardware descriptor lists if possible when more than one chunk |
| 1035 | * needs to be transferred (otherwise they don't make much sense). |
| 1036 | * |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 1037 | * Source/Destination address should be located in same 4GiB region |
| 1038 | * in the 40bit address space when it uses Hardware descriptor, |
| 1039 | * and cross_boundary is checking it. |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1040 | */ |
Kuninori Morimoto | 1175f83 | 2017-03-22 04:22:36 +0000 | [diff] [blame] | 1041 | desc->hwdescs.use = !cross_boundary && nchunks > 1; |
Jürg Billeter | ee4b876 | 2014-11-25 15:10:17 +0100 | [diff] [blame] | 1042 | if (desc->hwdescs.use) { |
| 1043 | if (rcar_dmac_fill_hwdesc(chan, desc) < 0) |
| 1044 | desc->hwdescs.use = false; |
| 1045 | } |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1046 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1047 | return &desc->async_tx; |
| 1048 | } |
| 1049 | |
| 1050 | /* ----------------------------------------------------------------------------- |
| 1051 | * DMA engine operations |
| 1052 | */ |
| 1053 | |
| 1054 | static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) |
| 1055 | { |
| 1056 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1057 | int ret; |
| 1058 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1059 | INIT_LIST_HEAD(&rchan->desc.chunks_free); |
| 1060 | INIT_LIST_HEAD(&rchan->desc.pages); |
| 1061 | |
| 1062 | /* Preallocate descriptors. */ |
| 1063 | ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL); |
| 1064 | if (ret < 0) |
| 1065 | return -ENOMEM; |
| 1066 | |
| 1067 | ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL); |
| 1068 | if (ret < 0) |
| 1069 | return -ENOMEM; |
| 1070 | |
| 1071 | return pm_runtime_get_sync(chan->device->dev); |
| 1072 | } |
| 1073 | |
| 1074 | static void rcar_dmac_free_chan_resources(struct dma_chan *chan) |
| 1075 | { |
| 1076 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1077 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); |
Niklas Söderlund | 3139dc8 | 2017-01-11 15:39:31 +0100 | [diff] [blame] | 1078 | struct rcar_dmac_chan_map *map = &rchan->map; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1079 | struct rcar_dmac_desc_page *page, *_page; |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 1080 | struct rcar_dmac_desc *desc; |
| 1081 | LIST_HEAD(list); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1082 | |
| 1083 | /* Protect against ISR */ |
| 1084 | spin_lock_irq(&rchan->lock); |
| 1085 | rcar_dmac_chan_halt(rchan); |
| 1086 | spin_unlock_irq(&rchan->lock); |
| 1087 | |
Niklas Söderlund | a1ed64e | 2017-05-16 01:09:17 +0200 | [diff] [blame] | 1088 | /* |
| 1089 | * Now no new interrupts will occur, but one might already be |
| 1090 | * running. Wait for it to finish before freeing resources. |
| 1091 | */ |
| 1092 | synchronize_irq(rchan->irq); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1093 | |
| 1094 | if (rchan->mid_rid >= 0) { |
| 1095 | /* The caller is holding dma_list_mutex */ |
| 1096 | clear_bit(rchan->mid_rid, dmac->modules); |
| 1097 | rchan->mid_rid = -EINVAL; |
| 1098 | } |
| 1099 | |
Laurent Pinchart | f7638c9 | 2015-01-27 15:58:53 +0200 | [diff] [blame] | 1100 | list_splice_init(&rchan->desc.free, &list); |
| 1101 | list_splice_init(&rchan->desc.pending, &list); |
| 1102 | list_splice_init(&rchan->desc.active, &list); |
| 1103 | list_splice_init(&rchan->desc.done, &list); |
| 1104 | list_splice_init(&rchan->desc.wait, &list); |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 1105 | |
Muhammad Hamza Farooq | 48c7365 | 2016-06-30 17:15:17 +0200 | [diff] [blame] | 1106 | rchan->desc.running = NULL; |
| 1107 | |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 1108 | list_for_each_entry(desc, &list, node) |
| 1109 | rcar_dmac_realloc_hwdesc(rchan, desc, 0); |
| 1110 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1111 | list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { |
| 1112 | list_del(&page->node); |
| 1113 | free_page((unsigned long)page); |
| 1114 | } |
| 1115 | |
Niklas Söderlund | 3139dc8 | 2017-01-11 15:39:31 +0100 | [diff] [blame] | 1116 | /* Remove slave mapping if present. */ |
| 1117 | if (map->slave.xfer_size) { |
| 1118 | dma_unmap_resource(chan->device->dev, map->addr, |
| 1119 | map->slave.xfer_size, map->dir, 0); |
| 1120 | map->slave.xfer_size = 0; |
| 1121 | } |
| 1122 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1123 | pm_runtime_put(chan->device->dev); |
| 1124 | } |
| 1125 | |
| 1126 | static struct dma_async_tx_descriptor * |
| 1127 | rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, |
| 1128 | dma_addr_t dma_src, size_t len, unsigned long flags) |
| 1129 | { |
| 1130 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1131 | struct scatterlist sgl; |
| 1132 | |
| 1133 | if (!len) |
| 1134 | return NULL; |
| 1135 | |
| 1136 | sg_init_table(&sgl, 1); |
| 1137 | sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len, |
| 1138 | offset_in_page(dma_src)); |
| 1139 | sg_dma_address(&sgl) = dma_src; |
| 1140 | sg_dma_len(&sgl) = len; |
| 1141 | |
| 1142 | return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest, |
| 1143 | DMA_MEM_TO_MEM, flags, false); |
| 1144 | } |
| 1145 | |
Niklas Söderlund | 9f87860 | 2016-08-10 13:22:19 +0200 | [diff] [blame] | 1146 | static int rcar_dmac_map_slave_addr(struct dma_chan *chan, |
| 1147 | enum dma_transfer_direction dir) |
| 1148 | { |
| 1149 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1150 | struct rcar_dmac_chan_map *map = &rchan->map; |
| 1151 | phys_addr_t dev_addr; |
| 1152 | size_t dev_size; |
| 1153 | enum dma_data_direction dev_dir; |
| 1154 | |
| 1155 | if (dir == DMA_DEV_TO_MEM) { |
| 1156 | dev_addr = rchan->src.slave_addr; |
| 1157 | dev_size = rchan->src.xfer_size; |
| 1158 | dev_dir = DMA_TO_DEVICE; |
| 1159 | } else { |
| 1160 | dev_addr = rchan->dst.slave_addr; |
| 1161 | dev_size = rchan->dst.xfer_size; |
| 1162 | dev_dir = DMA_FROM_DEVICE; |
| 1163 | } |
| 1164 | |
| 1165 | /* Reuse current map if possible. */ |
| 1166 | if (dev_addr == map->slave.slave_addr && |
| 1167 | dev_size == map->slave.xfer_size && |
| 1168 | dev_dir == map->dir) |
| 1169 | return 0; |
| 1170 | |
| 1171 | /* Remove old mapping if present. */ |
| 1172 | if (map->slave.xfer_size) |
| 1173 | dma_unmap_resource(chan->device->dev, map->addr, |
| 1174 | map->slave.xfer_size, map->dir, 0); |
| 1175 | map->slave.xfer_size = 0; |
| 1176 | |
| 1177 | /* Create new slave address map. */ |
| 1178 | map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, |
| 1179 | dev_dir, 0); |
| 1180 | |
| 1181 | if (dma_mapping_error(chan->device->dev, map->addr)) { |
| 1182 | dev_err(chan->device->dev, |
| 1183 | "chan%u: failed to map %zx@%pap", rchan->index, |
| 1184 | dev_size, &dev_addr); |
| 1185 | return -EIO; |
| 1186 | } |
| 1187 | |
| 1188 | dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", |
| 1189 | rchan->index, dev_size, &dev_addr, &map->addr, |
| 1190 | dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE"); |
| 1191 | |
| 1192 | map->slave.slave_addr = dev_addr; |
| 1193 | map->slave.xfer_size = dev_size; |
| 1194 | map->dir = dev_dir; |
| 1195 | |
| 1196 | return 0; |
| 1197 | } |
| 1198 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1199 | static struct dma_async_tx_descriptor * |
| 1200 | rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
| 1201 | unsigned int sg_len, enum dma_transfer_direction dir, |
| 1202 | unsigned long flags, void *context) |
| 1203 | { |
| 1204 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1205 | |
| 1206 | /* Someone calling slave DMA on a generic channel? */ |
Geert Uytterhoeven | 78efb76 | 2019-06-24 14:38:18 +0200 | [diff] [blame] | 1207 | if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1208 | dev_warn(chan->device->dev, |
| 1209 | "%s: bad parameter: len=%d, id=%d\n", |
| 1210 | __func__, sg_len, rchan->mid_rid); |
| 1211 | return NULL; |
| 1212 | } |
| 1213 | |
Niklas Söderlund | 9f87860 | 2016-08-10 13:22:19 +0200 | [diff] [blame] | 1214 | if (rcar_dmac_map_slave_addr(chan, dir)) |
| 1215 | return NULL; |
| 1216 | |
| 1217 | return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1218 | dir, flags, false); |
| 1219 | } |
| 1220 | |
| 1221 | #define RCAR_DMAC_MAX_SG_LEN 32 |
| 1222 | |
| 1223 | static struct dma_async_tx_descriptor * |
| 1224 | rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, |
| 1225 | size_t buf_len, size_t period_len, |
| 1226 | enum dma_transfer_direction dir, unsigned long flags) |
| 1227 | { |
| 1228 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1229 | struct dma_async_tx_descriptor *desc; |
| 1230 | struct scatterlist *sgl; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1231 | unsigned int sg_len; |
| 1232 | unsigned int i; |
| 1233 | |
| 1234 | /* Someone calling slave DMA on a generic channel? */ |
| 1235 | if (rchan->mid_rid < 0 || buf_len < period_len) { |
| 1236 | dev_warn(chan->device->dev, |
| 1237 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", |
| 1238 | __func__, buf_len, period_len, rchan->mid_rid); |
| 1239 | return NULL; |
| 1240 | } |
| 1241 | |
Niklas Söderlund | 9f87860 | 2016-08-10 13:22:19 +0200 | [diff] [blame] | 1242 | if (rcar_dmac_map_slave_addr(chan, dir)) |
| 1243 | return NULL; |
| 1244 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1245 | sg_len = buf_len / period_len; |
| 1246 | if (sg_len > RCAR_DMAC_MAX_SG_LEN) { |
| 1247 | dev_err(chan->device->dev, |
Colin Ian King | 1986f03 | 2020-03-16 09:16:53 +0000 | [diff] [blame] | 1248 | "chan%u: sg length %d exceeds limit %d", |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1249 | rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); |
| 1250 | return NULL; |
| 1251 | } |
| 1252 | |
| 1253 | /* |
| 1254 | * Allocate the sg list dynamically as it would consume too much stack |
| 1255 | * space. |
| 1256 | */ |
Julia Lawall | 7ffd5c8 | 2020-09-20 13:26:21 +0200 | [diff] [blame] | 1257 | sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1258 | if (!sgl) |
| 1259 | return NULL; |
| 1260 | |
| 1261 | sg_init_table(sgl, sg_len); |
| 1262 | |
| 1263 | for (i = 0; i < sg_len; ++i) { |
| 1264 | dma_addr_t src = buf_addr + (period_len * i); |
| 1265 | |
| 1266 | sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, |
| 1267 | offset_in_page(src)); |
| 1268 | sg_dma_address(&sgl[i]) = src; |
| 1269 | sg_dma_len(&sgl[i]) = period_len; |
| 1270 | } |
| 1271 | |
Niklas Söderlund | 9f87860 | 2016-08-10 13:22:19 +0200 | [diff] [blame] | 1272 | desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1273 | dir, flags, true); |
| 1274 | |
| 1275 | kfree(sgl); |
| 1276 | return desc; |
| 1277 | } |
| 1278 | |
| 1279 | static int rcar_dmac_device_config(struct dma_chan *chan, |
| 1280 | struct dma_slave_config *cfg) |
| 1281 | { |
| 1282 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1283 | |
| 1284 | /* |
| 1285 | * We could lock this, but you shouldn't be configuring the |
| 1286 | * channel, while using it... |
| 1287 | */ |
Niklas Söderlund | c5ed08e | 2016-08-10 13:22:18 +0200 | [diff] [blame] | 1288 | rchan->src.slave_addr = cfg->src_addr; |
| 1289 | rchan->dst.slave_addr = cfg->dst_addr; |
| 1290 | rchan->src.xfer_size = cfg->src_addr_width; |
| 1291 | rchan->dst.xfer_size = cfg->dst_addr_width; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1292 | |
| 1293 | return 0; |
| 1294 | } |
| 1295 | |
| 1296 | static int rcar_dmac_chan_terminate_all(struct dma_chan *chan) |
| 1297 | { |
| 1298 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1299 | unsigned long flags; |
| 1300 | |
| 1301 | spin_lock_irqsave(&rchan->lock, flags); |
| 1302 | rcar_dmac_chan_halt(rchan); |
| 1303 | spin_unlock_irqrestore(&rchan->lock, flags); |
| 1304 | |
| 1305 | /* |
| 1306 | * FIXME: No new interrupt can occur now, but the IRQ thread might still |
| 1307 | * be running. |
| 1308 | */ |
| 1309 | |
| 1310 | rcar_dmac_chan_reinit(rchan); |
| 1311 | |
| 1312 | return 0; |
| 1313 | } |
| 1314 | |
| 1315 | static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, |
| 1316 | dma_cookie_t cookie) |
| 1317 | { |
| 1318 | struct rcar_dmac_desc *desc = chan->desc.running; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1319 | struct rcar_dmac_xfer_chunk *running = NULL; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1320 | struct rcar_dmac_xfer_chunk *chunk; |
Laurent Pinchart | 55bd582 | 2016-06-30 17:15:18 +0200 | [diff] [blame] | 1321 | enum dma_status status; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1322 | unsigned int residue = 0; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1323 | unsigned int dptr = 0; |
Achim Dahlhoff | 6e7da74 | 2019-04-12 07:29:14 +0200 | [diff] [blame] | 1324 | unsigned int chcrb; |
| 1325 | unsigned int tcrb; |
| 1326 | unsigned int i; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1327 | |
| 1328 | if (!desc) |
| 1329 | return 0; |
| 1330 | |
| 1331 | /* |
Laurent Pinchart | 55bd582 | 2016-06-30 17:15:18 +0200 | [diff] [blame] | 1332 | * If the cookie corresponds to a descriptor that has been completed |
| 1333 | * there is no residue. The same check has already been performed by the |
| 1334 | * caller but without holding the channel lock, so the descriptor could |
| 1335 | * now be complete. |
| 1336 | */ |
| 1337 | status = dma_cookie_status(&chan->chan, cookie, NULL); |
| 1338 | if (status == DMA_COMPLETE) |
| 1339 | return 0; |
| 1340 | |
| 1341 | /* |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1342 | * If the cookie doesn't correspond to the currently running transfer |
| 1343 | * then the descriptor hasn't been processed yet, and the residue is |
| 1344 | * equal to the full descriptor size. |
Yoshihiro Shimoda | 3e08162 | 2018-02-02 19:05:15 +0900 | [diff] [blame] | 1345 | * Also, a client driver is possible to call this function before |
| 1346 | * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" |
| 1347 | * will be the next descriptor, and the done list will appear. So, if |
| 1348 | * the argument cookie matches the done list's cookie, we can assume |
| 1349 | * the residue is zero. |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1350 | */ |
Laurent Pinchart | 55bd582 | 2016-06-30 17:15:18 +0200 | [diff] [blame] | 1351 | if (cookie != desc->async_tx.cookie) { |
Yoshihiro Shimoda | 3e08162 | 2018-02-02 19:05:15 +0900 | [diff] [blame] | 1352 | list_for_each_entry(desc, &chan->desc.done, node) { |
| 1353 | if (cookie == desc->async_tx.cookie) |
| 1354 | return 0; |
| 1355 | } |
Laurent Pinchart | 55bd582 | 2016-06-30 17:15:18 +0200 | [diff] [blame] | 1356 | list_for_each_entry(desc, &chan->desc.pending, node) { |
| 1357 | if (cookie == desc->async_tx.cookie) |
| 1358 | return desc->size; |
| 1359 | } |
| 1360 | list_for_each_entry(desc, &chan->desc.active, node) { |
| 1361 | if (cookie == desc->async_tx.cookie) |
| 1362 | return desc->size; |
| 1363 | } |
| 1364 | |
| 1365 | /* |
| 1366 | * No descriptor found for the cookie, there's thus no residue. |
| 1367 | * This shouldn't happen if the calling driver passes a correct |
| 1368 | * cookie value. |
| 1369 | */ |
| 1370 | WARN(1, "No descriptor for cookie!"); |
| 1371 | return 0; |
| 1372 | } |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1373 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1374 | /* |
Achim Dahlhoff | 6e7da74 | 2019-04-12 07:29:14 +0200 | [diff] [blame] | 1375 | * We need to read two registers. |
| 1376 | * Make sure the control register does not skip to next chunk |
| 1377 | * while reading the counter. |
| 1378 | * Trying it 3 times should be enough: Initial read, retry, retry |
| 1379 | * for the paranoid. |
| 1380 | */ |
| 1381 | for (i = 0; i < 3; i++) { |
| 1382 | chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & |
| 1383 | RCAR_DMACHCRB_DPTR_MASK; |
| 1384 | tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB); |
| 1385 | /* Still the same? */ |
| 1386 | if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & |
| 1387 | RCAR_DMACHCRB_DPTR_MASK)) |
| 1388 | break; |
| 1389 | } |
| 1390 | WARN_ONCE(i >= 3, "residue might be not continuous!"); |
| 1391 | |
| 1392 | /* |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1393 | * In descriptor mode the descriptor running pointer is not maintained |
| 1394 | * by the interrupt handler, find the running descriptor from the |
| 1395 | * descriptor pointer field in the CHCRB register. In non-descriptor |
| 1396 | * mode just use the running descriptor pointer. |
| 1397 | */ |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 1398 | if (desc->hwdescs.use) { |
Achim Dahlhoff | 6e7da74 | 2019-04-12 07:29:14 +0200 | [diff] [blame] | 1399 | dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT; |
Kuninori Morimoto | 56b1770 | 2017-05-23 07:08:43 +0000 | [diff] [blame] | 1400 | if (dptr == 0) |
| 1401 | dptr = desc->nchunks; |
| 1402 | dptr--; |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1403 | WARN_ON(dptr >= desc->nchunks); |
| 1404 | } else { |
| 1405 | running = desc->running; |
| 1406 | } |
| 1407 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1408 | /* Compute the size of all chunks still to be transferred. */ |
| 1409 | list_for_each_entry_reverse(chunk, &desc->chunks, node) { |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1410 | if (chunk == running || ++dptr == desc->nchunks) |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1411 | break; |
| 1412 | |
| 1413 | residue += chunk->size; |
| 1414 | } |
| 1415 | |
| 1416 | /* Add the residue for the current chunk. */ |
Achim Dahlhoff | 6e7da74 | 2019-04-12 07:29:14 +0200 | [diff] [blame] | 1417 | residue += tcrb << desc->xfer_shift; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1418 | |
| 1419 | return residue; |
| 1420 | } |
| 1421 | |
| 1422 | static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, |
| 1423 | dma_cookie_t cookie, |
| 1424 | struct dma_tx_state *txstate) |
| 1425 | { |
| 1426 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1427 | enum dma_status status; |
| 1428 | unsigned long flags; |
| 1429 | unsigned int residue; |
Dirk Behme | 907bd68 | 2019-04-12 07:29:13 +0200 | [diff] [blame] | 1430 | bool cyclic; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1431 | |
| 1432 | status = dma_cookie_status(chan, cookie, txstate); |
| 1433 | if (status == DMA_COMPLETE || !txstate) |
| 1434 | return status; |
| 1435 | |
| 1436 | spin_lock_irqsave(&rchan->lock, flags); |
| 1437 | residue = rcar_dmac_chan_get_residue(rchan, cookie); |
Dirk Behme | 907bd68 | 2019-04-12 07:29:13 +0200 | [diff] [blame] | 1438 | cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1439 | spin_unlock_irqrestore(&rchan->lock, flags); |
| 1440 | |
Muhammad Hamza Farooq | 3544d28 | 2016-06-30 17:15:15 +0200 | [diff] [blame] | 1441 | /* if there's no residue, the cookie is complete */ |
Dirk Behme | 907bd68 | 2019-04-12 07:29:13 +0200 | [diff] [blame] | 1442 | if (!residue && !cyclic) |
Muhammad Hamza Farooq | 3544d28 | 2016-06-30 17:15:15 +0200 | [diff] [blame] | 1443 | return DMA_COMPLETE; |
| 1444 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1445 | dma_set_residue(txstate, residue); |
| 1446 | |
| 1447 | return status; |
| 1448 | } |
| 1449 | |
| 1450 | static void rcar_dmac_issue_pending(struct dma_chan *chan) |
| 1451 | { |
| 1452 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1453 | unsigned long flags; |
| 1454 | |
| 1455 | spin_lock_irqsave(&rchan->lock, flags); |
| 1456 | |
| 1457 | if (list_empty(&rchan->desc.pending)) |
| 1458 | goto done; |
| 1459 | |
| 1460 | /* Append the pending list to the active list. */ |
| 1461 | list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); |
| 1462 | |
| 1463 | /* |
| 1464 | * If no transfer is running pick the first descriptor from the active |
| 1465 | * list and start the transfer. |
| 1466 | */ |
| 1467 | if (!rchan->desc.running) { |
| 1468 | struct rcar_dmac_desc *desc; |
| 1469 | |
| 1470 | desc = list_first_entry(&rchan->desc.active, |
| 1471 | struct rcar_dmac_desc, node); |
| 1472 | rchan->desc.running = desc; |
| 1473 | |
| 1474 | rcar_dmac_chan_start_xfer(rchan); |
| 1475 | } |
| 1476 | |
| 1477 | done: |
| 1478 | spin_unlock_irqrestore(&rchan->lock, flags); |
| 1479 | } |
| 1480 | |
Niklas Söderlund | 30c4500 | 2017-05-16 01:09:16 +0200 | [diff] [blame] | 1481 | static void rcar_dmac_device_synchronize(struct dma_chan *chan) |
| 1482 | { |
| 1483 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
| 1484 | |
| 1485 | synchronize_irq(rchan->irq); |
| 1486 | } |
| 1487 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1488 | /* ----------------------------------------------------------------------------- |
| 1489 | * IRQ handling |
| 1490 | */ |
| 1491 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1492 | static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan) |
| 1493 | { |
| 1494 | struct rcar_dmac_desc *desc = chan->desc.running; |
| 1495 | unsigned int stage; |
| 1496 | |
| 1497 | if (WARN_ON(!desc || !desc->cyclic)) { |
| 1498 | /* |
| 1499 | * This should never happen, there should always be a running |
| 1500 | * cyclic descriptor when a descriptor stage end interrupt is |
| 1501 | * triggered. Warn and return. |
| 1502 | */ |
| 1503 | return IRQ_NONE; |
| 1504 | } |
| 1505 | |
| 1506 | /* Program the interrupt pointer to the next stage. */ |
| 1507 | stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & |
| 1508 | RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; |
| 1509 | rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage)); |
| 1510 | |
| 1511 | return IRQ_WAKE_THREAD; |
| 1512 | } |
| 1513 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1514 | static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) |
| 1515 | { |
| 1516 | struct rcar_dmac_desc *desc = chan->desc.running; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1517 | irqreturn_t ret = IRQ_WAKE_THREAD; |
| 1518 | |
| 1519 | if (WARN_ON_ONCE(!desc)) { |
| 1520 | /* |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1521 | * This should never happen, there should always be a running |
| 1522 | * descriptor when a transfer end interrupt is triggered. Warn |
| 1523 | * and return. |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1524 | */ |
| 1525 | return IRQ_NONE; |
| 1526 | } |
| 1527 | |
| 1528 | /* |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1529 | * The transfer end interrupt isn't generated for each chunk when using |
| 1530 | * descriptor mode. Only update the running chunk pointer in |
| 1531 | * non-descriptor mode. |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1532 | */ |
Laurent Pinchart | 1ed1315 | 2014-07-19 00:05:14 +0200 | [diff] [blame] | 1533 | if (!desc->hwdescs.use) { |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1534 | /* |
| 1535 | * If we haven't completed the last transfer chunk simply move |
| 1536 | * to the next one. Only wake the IRQ thread if the transfer is |
| 1537 | * cyclic. |
| 1538 | */ |
| 1539 | if (!list_is_last(&desc->running->node, &desc->chunks)) { |
| 1540 | desc->running = list_next_entry(desc->running, node); |
| 1541 | if (!desc->cyclic) |
| 1542 | ret = IRQ_HANDLED; |
| 1543 | goto done; |
| 1544 | } |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1545 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1546 | /* |
| 1547 | * We've completed the last transfer chunk. If the transfer is |
| 1548 | * cyclic, move back to the first one. |
| 1549 | */ |
| 1550 | if (desc->cyclic) { |
| 1551 | desc->running = |
| 1552 | list_first_entry(&desc->chunks, |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1553 | struct rcar_dmac_xfer_chunk, |
| 1554 | node); |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1555 | goto done; |
| 1556 | } |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1557 | } |
| 1558 | |
| 1559 | /* The descriptor is complete, move it to the done list. */ |
| 1560 | list_move_tail(&desc->node, &chan->desc.done); |
| 1561 | |
| 1562 | /* Queue the next descriptor, if any. */ |
| 1563 | if (!list_empty(&chan->desc.active)) |
| 1564 | chan->desc.running = list_first_entry(&chan->desc.active, |
| 1565 | struct rcar_dmac_desc, |
| 1566 | node); |
| 1567 | else |
| 1568 | chan->desc.running = NULL; |
| 1569 | |
| 1570 | done: |
| 1571 | if (chan->desc.running) |
| 1572 | rcar_dmac_chan_start_xfer(chan); |
| 1573 | |
| 1574 | return ret; |
| 1575 | } |
| 1576 | |
| 1577 | static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) |
| 1578 | { |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1579 | u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1580 | struct rcar_dmac_chan *chan = dev; |
| 1581 | irqreturn_t ret = IRQ_NONE; |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 1582 | bool reinit = false; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1583 | u32 chcr; |
| 1584 | |
| 1585 | spin_lock(&chan->lock); |
| 1586 | |
| 1587 | chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 1588 | if (chcr & RCAR_DMACHCR_CAE) { |
Kuninori Morimoto | e919417 | 2018-07-03 00:29:29 +0000 | [diff] [blame] | 1589 | struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device); |
| 1590 | |
| 1591 | /* |
| 1592 | * We don't need to call rcar_dmac_chan_halt() |
| 1593 | * because channel is already stopped in error case. |
| 1594 | * We need to clear register and check DE bit as recovery. |
| 1595 | */ |
Geert Uytterhoeven | 245bbd1 | 2021-01-28 09:44:54 +0100 | [diff] [blame] | 1596 | rcar_dmac_chan_clear(dmac, chan); |
Kuninori Morimoto | e919417 | 2018-07-03 00:29:29 +0000 | [diff] [blame] | 1597 | rcar_dmac_chcr_de_barrier(chan); |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 1598 | reinit = true; |
| 1599 | goto spin_lock_end; |
| 1600 | } |
| 1601 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1602 | if (chcr & RCAR_DMACHCR_TE) |
| 1603 | mask |= RCAR_DMACHCR_DE; |
| 1604 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); |
Kuninori Morimoto | a8d46a7 | 2017-11-17 11:00:28 +0900 | [diff] [blame] | 1605 | if (mask & RCAR_DMACHCR_DE) |
| 1606 | rcar_dmac_chcr_de_barrier(chan); |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1607 | |
| 1608 | if (chcr & RCAR_DMACHCR_DSE) |
| 1609 | ret |= rcar_dmac_isr_desc_stage_end(chan); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1610 | |
| 1611 | if (chcr & RCAR_DMACHCR_TE) |
| 1612 | ret |= rcar_dmac_isr_transfer_end(chan); |
| 1613 | |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 1614 | spin_lock_end: |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1615 | spin_unlock(&chan->lock); |
| 1616 | |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 1617 | if (reinit) { |
| 1618 | dev_err(chan->chan.device->dev, "Channel Address Error\n"); |
| 1619 | |
| 1620 | rcar_dmac_chan_reinit(chan); |
| 1621 | ret = IRQ_HANDLED; |
| 1622 | } |
| 1623 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1624 | return ret; |
| 1625 | } |
| 1626 | |
| 1627 | static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) |
| 1628 | { |
| 1629 | struct rcar_dmac_chan *chan = dev; |
| 1630 | struct rcar_dmac_desc *desc; |
Dave Jiang | 964b2fd | 2016-07-20 13:12:53 -0700 | [diff] [blame] | 1631 | struct dmaengine_desc_callback cb; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1632 | |
| 1633 | spin_lock_irq(&chan->lock); |
| 1634 | |
| 1635 | /* For cyclic transfers notify the user after every chunk. */ |
| 1636 | if (chan->desc.running && chan->desc.running->cyclic) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1637 | desc = chan->desc.running; |
Dave Jiang | 964b2fd | 2016-07-20 13:12:53 -0700 | [diff] [blame] | 1638 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1639 | |
Dave Jiang | 964b2fd | 2016-07-20 13:12:53 -0700 | [diff] [blame] | 1640 | if (dmaengine_desc_callback_valid(&cb)) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1641 | spin_unlock_irq(&chan->lock); |
Dave Jiang | 964b2fd | 2016-07-20 13:12:53 -0700 | [diff] [blame] | 1642 | dmaengine_desc_callback_invoke(&cb, NULL); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1643 | spin_lock_irq(&chan->lock); |
| 1644 | } |
| 1645 | } |
| 1646 | |
| 1647 | /* |
| 1648 | * Call the callback function for all descriptors on the done list and |
| 1649 | * move them to the ack wait list. |
| 1650 | */ |
| 1651 | while (!list_empty(&chan->desc.done)) { |
| 1652 | desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, |
| 1653 | node); |
| 1654 | dma_cookie_complete(&desc->async_tx); |
| 1655 | list_del(&desc->node); |
| 1656 | |
Dave Jiang | 964b2fd | 2016-07-20 13:12:53 -0700 | [diff] [blame] | 1657 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
| 1658 | if (dmaengine_desc_callback_valid(&cb)) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1659 | spin_unlock_irq(&chan->lock); |
| 1660 | /* |
| 1661 | * We own the only reference to this descriptor, we can |
| 1662 | * safely dereference it without holding the channel |
| 1663 | * lock. |
| 1664 | */ |
Dave Jiang | 964b2fd | 2016-07-20 13:12:53 -0700 | [diff] [blame] | 1665 | dmaengine_desc_callback_invoke(&cb, NULL); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1666 | spin_lock_irq(&chan->lock); |
| 1667 | } |
| 1668 | |
| 1669 | list_add_tail(&desc->node, &chan->desc.wait); |
| 1670 | } |
| 1671 | |
Laurent Pinchart | ccadee9 | 2014-07-16 23:15:48 +0200 | [diff] [blame] | 1672 | spin_unlock_irq(&chan->lock); |
| 1673 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1674 | /* Recycle all acked descriptors. */ |
| 1675 | rcar_dmac_desc_recycle_acked(chan); |
| 1676 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1677 | return IRQ_HANDLED; |
| 1678 | } |
| 1679 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1680 | /* ----------------------------------------------------------------------------- |
| 1681 | * OF xlate and channel filter |
| 1682 | */ |
| 1683 | |
| 1684 | static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) |
| 1685 | { |
| 1686 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); |
| 1687 | struct of_phandle_args *dma_spec = arg; |
| 1688 | |
| 1689 | /* |
| 1690 | * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate |
| 1691 | * function knows from which device it wants to allocate a channel from, |
| 1692 | * and would be perfectly capable of selecting the channel it wants. |
| 1693 | * Forcing it to call dma_request_channel() and iterate through all |
| 1694 | * channels from all controllers is just pointless. |
| 1695 | */ |
Baolin Wang | 1dc1b29 | 2019-05-20 19:32:20 +0800 | [diff] [blame] | 1696 | if (chan->device->device_config != rcar_dmac_device_config) |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1697 | return false; |
| 1698 | |
| 1699 | return !test_and_set_bit(dma_spec->args[0], dmac->modules); |
| 1700 | } |
| 1701 | |
| 1702 | static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, |
| 1703 | struct of_dma *ofdma) |
| 1704 | { |
| 1705 | struct rcar_dmac_chan *rchan; |
| 1706 | struct dma_chan *chan; |
| 1707 | dma_cap_mask_t mask; |
| 1708 | |
| 1709 | if (dma_spec->args_count != 1) |
| 1710 | return NULL; |
| 1711 | |
| 1712 | /* Only slave DMA channels can be allocated via DT */ |
| 1713 | dma_cap_zero(mask); |
| 1714 | dma_cap_set(DMA_SLAVE, mask); |
| 1715 | |
Baolin Wang | 1dc1b29 | 2019-05-20 19:32:20 +0800 | [diff] [blame] | 1716 | chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec, |
| 1717 | ofdma->of_node); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1718 | if (!chan) |
| 1719 | return NULL; |
| 1720 | |
| 1721 | rchan = to_rcar_dmac_chan(chan); |
| 1722 | rchan->mid_rid = dma_spec->args[0]; |
| 1723 | |
| 1724 | return chan; |
| 1725 | } |
| 1726 | |
| 1727 | /* ----------------------------------------------------------------------------- |
| 1728 | * Power management |
| 1729 | */ |
| 1730 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1731 | #ifdef CONFIG_PM |
| 1732 | static int rcar_dmac_runtime_suspend(struct device *dev) |
| 1733 | { |
| 1734 | return 0; |
| 1735 | } |
| 1736 | |
| 1737 | static int rcar_dmac_runtime_resume(struct device *dev) |
| 1738 | { |
| 1739 | struct rcar_dmac *dmac = dev_get_drvdata(dev); |
| 1740 | |
| 1741 | return rcar_dmac_init(dmac); |
| 1742 | } |
| 1743 | #endif |
| 1744 | |
| 1745 | static const struct dev_pm_ops rcar_dmac_pm = { |
Geert Uytterhoeven | 1131b0a | 2018-01-17 10:38:28 +0100 | [diff] [blame] | 1746 | /* |
| 1747 | * TODO for system sleep/resume: |
| 1748 | * - Wait for the current transfer to complete and stop the device, |
| 1749 | * - Resume transfers, if any. |
| 1750 | */ |
Geert Uytterhoeven | 73dcc66 | 2018-03-29 18:53:32 +0200 | [diff] [blame] | 1751 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
| 1752 | pm_runtime_force_resume) |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1753 | SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, |
| 1754 | NULL) |
| 1755 | }; |
| 1756 | |
| 1757 | /* ----------------------------------------------------------------------------- |
| 1758 | * Probe and remove |
| 1759 | */ |
| 1760 | |
| 1761 | static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 1762 | struct rcar_dmac_chan *rchan) |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1763 | { |
| 1764 | struct platform_device *pdev = to_platform_device(dmac->dev); |
| 1765 | struct dma_chan *chan = &rchan->chan; |
| 1766 | char pdev_irqname[5]; |
| 1767 | char *irqname; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1768 | int ret; |
| 1769 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1770 | rchan->mid_rid = -EINVAL; |
| 1771 | |
| 1772 | spin_lock_init(&rchan->lock); |
| 1773 | |
Laurent Pinchart | f7638c9 | 2015-01-27 15:58:53 +0200 | [diff] [blame] | 1774 | INIT_LIST_HEAD(&rchan->desc.free); |
| 1775 | INIT_LIST_HEAD(&rchan->desc.pending); |
| 1776 | INIT_LIST_HEAD(&rchan->desc.active); |
| 1777 | INIT_LIST_HEAD(&rchan->desc.done); |
| 1778 | INIT_LIST_HEAD(&rchan->desc.wait); |
| 1779 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1780 | /* Request the channel interrupt. */ |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 1781 | sprintf(pdev_irqname, "ch%u", rchan->index); |
Niklas Söderlund | 427d5ec | 2017-05-16 01:09:15 +0200 | [diff] [blame] | 1782 | rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); |
Stephen Boyd | e17be6e | 2019-07-30 11:15:10 -0700 | [diff] [blame] | 1783 | if (rchan->irq < 0) |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1784 | return -ENODEV; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1785 | |
| 1786 | irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 1787 | dev_name(dmac->dev), rchan->index); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1788 | if (!irqname) |
| 1789 | return -ENOMEM; |
| 1790 | |
Kuninori Morimoto | 5e85704 | 2017-08-21 06:31:57 +0000 | [diff] [blame] | 1791 | /* |
| 1792 | * Initialize the DMA engine channel and add it to the DMA engine |
| 1793 | * channels list. |
| 1794 | */ |
| 1795 | chan->device = &dmac->engine; |
| 1796 | dma_cookie_init(chan); |
| 1797 | |
| 1798 | list_add_tail(&chan->device_node, &dmac->engine.channels); |
| 1799 | |
Niklas Söderlund | 427d5ec | 2017-05-16 01:09:15 +0200 | [diff] [blame] | 1800 | ret = devm_request_threaded_irq(dmac->dev, rchan->irq, |
| 1801 | rcar_dmac_isr_channel, |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1802 | rcar_dmac_isr_channel_thread, 0, |
| 1803 | irqname, rchan); |
| 1804 | if (ret) { |
Niklas Söderlund | 427d5ec | 2017-05-16 01:09:15 +0200 | [diff] [blame] | 1805 | dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", |
| 1806 | rchan->irq, ret); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1807 | return ret; |
| 1808 | } |
| 1809 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1810 | return 0; |
| 1811 | } |
| 1812 | |
Yoshihiro Shimoda | cf24aac | 2019-09-02 20:44:03 +0900 | [diff] [blame] | 1813 | #define RCAR_DMAC_MAX_CHANNELS 32 |
| 1814 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1815 | static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) |
| 1816 | { |
| 1817 | struct device_node *np = dev->of_node; |
| 1818 | int ret; |
| 1819 | |
| 1820 | ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); |
| 1821 | if (ret < 0) { |
| 1822 | dev_err(dev, "unable to read dma-channels property\n"); |
| 1823 | return ret; |
| 1824 | } |
| 1825 | |
Yoshihiro Shimoda | cf24aac | 2019-09-02 20:44:03 +0900 | [diff] [blame] | 1826 | /* The hardware and driver don't support more than 32 bits in CHCLR */ |
| 1827 | if (dmac->n_channels <= 0 || |
| 1828 | dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) { |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1829 | dev_err(dev, "invalid number of channels %u\n", |
| 1830 | dmac->n_channels); |
| 1831 | return -EINVAL; |
| 1832 | } |
| 1833 | |
Yoshihiro Shimoda | fcf8adb | 2019-09-09 15:34:52 +0900 | [diff] [blame] | 1834 | /* |
| 1835 | * If the driver is unable to read dma-channel-mask property, |
| 1836 | * the driver assumes that it can use all channels. |
| 1837 | */ |
Yoshihiro Shimoda | cf24aac | 2019-09-02 20:44:03 +0900 | [diff] [blame] | 1838 | dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); |
Yoshihiro Shimoda | fcf8adb | 2019-09-09 15:34:52 +0900 | [diff] [blame] | 1839 | of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask); |
| 1840 | |
| 1841 | /* If the property has out-of-channel mask, this driver clears it */ |
| 1842 | dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0); |
Yoshihiro Shimoda | cf24aac | 2019-09-02 20:44:03 +0900 | [diff] [blame] | 1843 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1844 | return 0; |
| 1845 | } |
| 1846 | |
| 1847 | static int rcar_dmac_probe(struct platform_device *pdev) |
| 1848 | { |
| 1849 | const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | |
| 1850 | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | |
| 1851 | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | |
| 1852 | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; |
Geert Uytterhoeven | d249b5f | 2021-01-28 09:44:53 +0100 | [diff] [blame] | 1853 | const struct rcar_dmac_of_data *data; |
| 1854 | struct rcar_dmac_chan *chan; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1855 | struct dma_device *engine; |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 1856 | void __iomem *chan_base; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1857 | struct rcar_dmac *dmac; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1858 | unsigned int i; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1859 | int ret; |
| 1860 | |
Yoshihiro Shimoda | 2df4a02 | 2019-09-09 15:34:50 +0900 | [diff] [blame] | 1861 | data = of_device_get_match_data(&pdev->dev); |
| 1862 | if (!data) |
| 1863 | return -EINVAL; |
| 1864 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1865 | dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); |
| 1866 | if (!dmac) |
| 1867 | return -ENOMEM; |
| 1868 | |
| 1869 | dmac->dev = &pdev->dev; |
| 1870 | platform_set_drvdata(pdev, dmac); |
Wolfram Sang | 97d49c5 | 2018-09-14 17:43:28 +0200 | [diff] [blame] | 1871 | dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); |
Geert Uytterhoeven | dc31234 | 2017-02-13 12:00:26 +0100 | [diff] [blame] | 1872 | dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1873 | |
| 1874 | ret = rcar_dmac_parse_of(&pdev->dev, dmac); |
| 1875 | if (ret < 0) |
| 1876 | return ret; |
| 1877 | |
Laurent Pinchart | be6893e | 2015-01-27 19:04:10 +0200 | [diff] [blame] | 1878 | /* |
| 1879 | * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be |
| 1880 | * flushed correctly, resulting in memory corruption. DMAC 0 channel 0 |
| 1881 | * is connected to microTLB 0 on currently supported platforms, so we |
| 1882 | * can't use it with the IPMMU. As the IOMMU API operates at the device |
| 1883 | * level we can't disable it selectively, so ignore channel 0 for now if |
| 1884 | * the device is part of an IOMMU group. |
| 1885 | */ |
Yoshihiro Shimoda | cf24aac | 2019-09-02 20:44:03 +0900 | [diff] [blame] | 1886 | if (device_iommu_mapped(&pdev->dev)) |
| 1887 | dmac->channels_mask &= ~BIT(0); |
Laurent Pinchart | be6893e | 2015-01-27 19:04:10 +0200 | [diff] [blame] | 1888 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1889 | dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, |
| 1890 | sizeof(*dmac->channels), GFP_KERNEL); |
| 1891 | if (!dmac->channels) |
| 1892 | return -ENOMEM; |
| 1893 | |
| 1894 | /* Request resources. */ |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 1895 | dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0); |
| 1896 | if (IS_ERR(dmac->dmac_base)) |
| 1897 | return PTR_ERR(dmac->dmac_base); |
| 1898 | |
| 1899 | if (!data->chan_offset_base) { |
| 1900 | dmac->chan_base = devm_platform_ioremap_resource(pdev, 1); |
| 1901 | if (IS_ERR(dmac->chan_base)) |
| 1902 | return PTR_ERR(dmac->chan_base); |
| 1903 | |
| 1904 | chan_base = dmac->chan_base; |
| 1905 | } else { |
| 1906 | chan_base = dmac->dmac_base + data->chan_offset_base; |
| 1907 | } |
| 1908 | |
| 1909 | for_each_rcar_dmac_chan(i, dmac, chan) { |
| 1910 | chan->index = i; |
| 1911 | chan->iomem = chan_base + i * data->chan_offset_stride; |
| 1912 | } |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1913 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1914 | /* Enable runtime PM and initialize the device. */ |
| 1915 | pm_runtime_enable(&pdev->dev); |
Zou Wei | dea8464 | 2021-05-31 14:36:03 +0800 | [diff] [blame] | 1916 | ret = pm_runtime_resume_and_get(&pdev->dev); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1917 | if (ret < 0) { |
| 1918 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); |
Dongliang Mu | 05f4fae | 2021-10-20 22:35:33 +0800 | [diff] [blame] | 1919 | goto err_pm_disable; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1920 | } |
| 1921 | |
| 1922 | ret = rcar_dmac_init(dmac); |
| 1923 | pm_runtime_put(&pdev->dev); |
| 1924 | |
| 1925 | if (ret) { |
| 1926 | dev_err(&pdev->dev, "failed to reset device\n"); |
Dongliang Mu | 05f4fae | 2021-10-20 22:35:33 +0800 | [diff] [blame] | 1927 | goto err_pm_disable; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1928 | } |
| 1929 | |
Kuninori Morimoto | 5e85704 | 2017-08-21 06:31:57 +0000 | [diff] [blame] | 1930 | /* Initialize engine */ |
| 1931 | engine = &dmac->engine; |
| 1932 | |
| 1933 | dma_cap_set(DMA_MEMCPY, engine->cap_mask); |
| 1934 | dma_cap_set(DMA_SLAVE, engine->cap_mask); |
| 1935 | |
| 1936 | engine->dev = &pdev->dev; |
| 1937 | engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); |
| 1938 | |
| 1939 | engine->src_addr_widths = widths; |
| 1940 | engine->dst_addr_widths = widths; |
| 1941 | engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); |
| 1942 | engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
| 1943 | |
| 1944 | engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; |
| 1945 | engine->device_free_chan_resources = rcar_dmac_free_chan_resources; |
| 1946 | engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; |
| 1947 | engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; |
| 1948 | engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; |
| 1949 | engine->device_config = rcar_dmac_device_config; |
Yoshihiro Shimoda | 8115ce7 | 2018-07-11 11:10:16 +0900 | [diff] [blame] | 1950 | engine->device_pause = rcar_dmac_chan_pause; |
Kuninori Morimoto | 5e85704 | 2017-08-21 06:31:57 +0000 | [diff] [blame] | 1951 | engine->device_terminate_all = rcar_dmac_chan_terminate_all; |
| 1952 | engine->device_tx_status = rcar_dmac_tx_status; |
| 1953 | engine->device_issue_pending = rcar_dmac_issue_pending; |
| 1954 | engine->device_synchronize = rcar_dmac_device_synchronize; |
| 1955 | |
| 1956 | INIT_LIST_HEAD(&engine->channels); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1957 | |
Geert Uytterhoeven | d249b5f | 2021-01-28 09:44:53 +0100 | [diff] [blame] | 1958 | for_each_rcar_dmac_chan(i, dmac, chan) { |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 1959 | ret = rcar_dmac_chan_probe(dmac, chan); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1960 | if (ret < 0) |
Dongliang Mu | 05f4fae | 2021-10-20 22:35:33 +0800 | [diff] [blame] | 1961 | goto err_pm_disable; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1962 | } |
| 1963 | |
| 1964 | /* Register the DMAC as a DMA provider for DT. */ |
| 1965 | ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, |
| 1966 | NULL); |
| 1967 | if (ret < 0) |
Dongliang Mu | 05f4fae | 2021-10-20 22:35:33 +0800 | [diff] [blame] | 1968 | goto err_pm_disable; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1969 | |
| 1970 | /* |
| 1971 | * Register the DMA engine device. |
| 1972 | * |
| 1973 | * Default transfer size of 32 bytes requires 32-byte alignment. |
| 1974 | */ |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1975 | ret = dma_async_device_register(engine); |
| 1976 | if (ret < 0) |
Dongliang Mu | 05f4fae | 2021-10-20 22:35:33 +0800 | [diff] [blame] | 1977 | goto err_dma_free; |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1978 | |
| 1979 | return 0; |
| 1980 | |
Dongliang Mu | 05f4fae | 2021-10-20 22:35:33 +0800 | [diff] [blame] | 1981 | err_dma_free: |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1982 | of_dma_controller_free(pdev->dev.of_node); |
Dongliang Mu | 05f4fae | 2021-10-20 22:35:33 +0800 | [diff] [blame] | 1983 | err_pm_disable: |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 1984 | pm_runtime_disable(&pdev->dev); |
| 1985 | return ret; |
| 1986 | } |
| 1987 | |
| 1988 | static int rcar_dmac_remove(struct platform_device *pdev) |
| 1989 | { |
| 1990 | struct rcar_dmac *dmac = platform_get_drvdata(pdev); |
| 1991 | |
| 1992 | of_dma_controller_free(pdev->dev.of_node); |
| 1993 | dma_async_device_unregister(&dmac->engine); |
| 1994 | |
| 1995 | pm_runtime_disable(&pdev->dev); |
| 1996 | |
| 1997 | return 0; |
| 1998 | } |
| 1999 | |
| 2000 | static void rcar_dmac_shutdown(struct platform_device *pdev) |
| 2001 | { |
| 2002 | struct rcar_dmac *dmac = platform_get_drvdata(pdev); |
| 2003 | |
Kuninori Morimoto | 9203dbe | 2018-06-15 00:53:33 +0000 | [diff] [blame] | 2004 | rcar_dmac_stop_all_chan(dmac); |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 2005 | } |
| 2006 | |
Yoshihiro Shimoda | 2df4a02 | 2019-09-09 15:34:50 +0900 | [diff] [blame] | 2007 | static const struct rcar_dmac_of_data rcar_dmac_data = { |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 2008 | .chan_offset_base = 0x8000, |
| 2009 | .chan_offset_stride = 0x80, |
| 2010 | }; |
| 2011 | |
| 2012 | static const struct rcar_dmac_of_data rcar_v3u_dmac_data = { |
| 2013 | .chan_offset_base = 0x0, |
| 2014 | .chan_offset_stride = 0x1000, |
Yoshihiro Shimoda | 2df4a02 | 2019-09-09 15:34:50 +0900 | [diff] [blame] | 2015 | }; |
| 2016 | |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 2017 | static const struct of_device_id rcar_dmac_of_ids[] = { |
Yoshihiro Shimoda | 2df4a02 | 2019-09-09 15:34:50 +0900 | [diff] [blame] | 2018 | { |
| 2019 | .compatible = "renesas,rcar-dmac", |
| 2020 | .data = &rcar_dmac_data, |
Geert Uytterhoeven | e5bfbbb | 2021-01-28 09:44:55 +0100 | [diff] [blame] | 2021 | }, { |
| 2022 | .compatible = "renesas,dmac-r8a779a0", |
| 2023 | .data = &rcar_v3u_dmac_data, |
Yoshihiro Shimoda | 2df4a02 | 2019-09-09 15:34:50 +0900 | [diff] [blame] | 2024 | }, |
Laurent Pinchart | 87244fe | 2014-07-09 00:42:19 +0200 | [diff] [blame] | 2025 | { /* Sentinel */ } |
| 2026 | }; |
| 2027 | MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); |
| 2028 | |
| 2029 | static struct platform_driver rcar_dmac_driver = { |
| 2030 | .driver = { |
| 2031 | .pm = &rcar_dmac_pm, |
| 2032 | .name = "rcar-dmac", |
| 2033 | .of_match_table = rcar_dmac_of_ids, |
| 2034 | }, |
| 2035 | .probe = rcar_dmac_probe, |
| 2036 | .remove = rcar_dmac_remove, |
| 2037 | .shutdown = rcar_dmac_shutdown, |
| 2038 | }; |
| 2039 | |
| 2040 | module_platform_driver(rcar_dmac_driver); |
| 2041 | |
| 2042 | MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver"); |
| 2043 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); |
| 2044 | MODULE_LICENSE("GPL v2"); |