Thomas Gleixner | 1802d0b | 2019-05-27 08:55:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 2 | /* |
| 3 | * timb_dma.c timberdale FPGA DMA driver |
| 4 | * Copyright (c) 2010 Intel Corporation |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /* Supports: |
| 8 | * Timberdale FPGA DMA engine |
| 9 | */ |
| 10 | |
| 11 | #include <linux/dmaengine.h> |
| 12 | #include <linux/dma-mapping.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/io.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/platform_device.h> |
Stephen Rothwell | 6a3cd3e | 2010-03-29 15:54:40 +1100 | [diff] [blame] | 18 | #include <linux/slab.h> |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 19 | |
| 20 | #include <linux/timb_dma.h> |
| 21 | |
Russell King - ARM Linux | d2ebfb3 | 2012-03-06 22:34:26 +0000 | [diff] [blame] | 22 | #include "dmaengine.h" |
| 23 | |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 24 | #define DRIVER_NAME "timb-dma" |
| 25 | |
| 26 | /* Global DMA registers */ |
| 27 | #define TIMBDMA_ACR 0x34 |
| 28 | #define TIMBDMA_32BIT_ADDR 0x01 |
| 29 | |
| 30 | #define TIMBDMA_ISR 0x080000 |
| 31 | #define TIMBDMA_IPR 0x080004 |
| 32 | #define TIMBDMA_IER 0x080008 |
| 33 | |
| 34 | /* Channel specific registers */ |
| 35 | /* RX instances base addresses are 0x00, 0x40, 0x80 ... |
| 36 | * TX instances base addresses are 0x18, 0x58, 0x98 ... |
| 37 | */ |
| 38 | #define TIMBDMA_INSTANCE_OFFSET 0x40 |
| 39 | #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 |
| 40 | |
| 41 | /* RX registers, relative the instance base */ |
| 42 | #define TIMBDMA_OFFS_RX_DHAR 0x00 |
| 43 | #define TIMBDMA_OFFS_RX_DLAR 0x04 |
| 44 | #define TIMBDMA_OFFS_RX_LR 0x0C |
| 45 | #define TIMBDMA_OFFS_RX_BLR 0x10 |
| 46 | #define TIMBDMA_OFFS_RX_ER 0x14 |
| 47 | #define TIMBDMA_RX_EN 0x01 |
| 48 | /* bytes per Row, video specific register |
| 49 | * which is placed after the TX registers... |
| 50 | */ |
| 51 | #define TIMBDMA_OFFS_RX_BPRR 0x30 |
| 52 | |
| 53 | /* TX registers, relative the instance base */ |
| 54 | #define TIMBDMA_OFFS_TX_DHAR 0x00 |
| 55 | #define TIMBDMA_OFFS_TX_DLAR 0x04 |
| 56 | #define TIMBDMA_OFFS_TX_BLR 0x0C |
| 57 | #define TIMBDMA_OFFS_TX_LR 0x14 |
| 58 | |
| 59 | |
| 60 | #define TIMB_DMA_DESC_SIZE 8 |
| 61 | |
| 62 | struct timb_dma_desc { |
| 63 | struct list_head desc_node; |
| 64 | struct dma_async_tx_descriptor txd; |
| 65 | u8 *desc_list; |
| 66 | unsigned int desc_list_len; |
| 67 | bool interrupt; |
| 68 | }; |
| 69 | |
| 70 | struct timb_dma_chan { |
| 71 | struct dma_chan chan; |
| 72 | void __iomem *membase; |
Richard Röjfors | 0f65169 | 2010-03-26 08:23:58 +0100 | [diff] [blame] | 73 | spinlock_t lock; /* Used to protect data structures, |
| 74 | especially the lists and descriptors, |
| 75 | from races between the tasklet and calls |
| 76 | from above */ |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 77 | bool ongoing; |
| 78 | struct list_head active_list; |
| 79 | struct list_head queue; |
| 80 | struct list_head free_list; |
| 81 | unsigned int bytes_per_line; |
Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 82 | enum dma_transfer_direction direction; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 83 | unsigned int descs; /* Descriptors to allocate */ |
| 84 | unsigned int desc_elems; /* number of elems per descriptor */ |
| 85 | }; |
| 86 | |
| 87 | struct timb_dma { |
| 88 | struct dma_device dma; |
| 89 | void __iomem *membase; |
| 90 | struct tasklet_struct tasklet; |
Gustavo A. R. Silva | 466f966 | 2020-05-28 09:35:11 -0500 | [diff] [blame] | 91 | struct timb_dma_chan channels[]; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 92 | }; |
| 93 | |
| 94 | static struct device *chan2dev(struct dma_chan *chan) |
| 95 | { |
| 96 | return &chan->dev->device; |
| 97 | } |
| 98 | static struct device *chan2dmadev(struct dma_chan *chan) |
| 99 | { |
| 100 | return chan2dev(chan)->parent->parent; |
| 101 | } |
| 102 | |
| 103 | static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) |
| 104 | { |
| 105 | int id = td_chan->chan.chan_id; |
| 106 | return (struct timb_dma *)((u8 *)td_chan - |
| 107 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); |
| 108 | } |
| 109 | |
| 110 | /* Must be called with the spinlock held */ |
| 111 | static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) |
| 112 | { |
| 113 | int id = td_chan->chan.chan_id; |
| 114 | struct timb_dma *td = tdchantotd(td_chan); |
| 115 | u32 ier; |
| 116 | |
| 117 | /* enable interrupt for this channel */ |
| 118 | ier = ioread32(td->membase + TIMBDMA_IER); |
| 119 | ier |= 1 << id; |
| 120 | dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, |
| 121 | ier); |
| 122 | iowrite32(ier, td->membase + TIMBDMA_IER); |
| 123 | } |
| 124 | |
| 125 | /* Should be called with the spinlock held */ |
| 126 | static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) |
| 127 | { |
| 128 | int id = td_chan->chan.chan_id; |
| 129 | struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - |
| 130 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); |
| 131 | u32 isr; |
| 132 | bool done = false; |
| 133 | |
| 134 | dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); |
| 135 | |
| 136 | isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); |
| 137 | if (isr) { |
| 138 | iowrite32(isr, td->membase + TIMBDMA_ISR); |
| 139 | done = true; |
| 140 | } |
| 141 | |
| 142 | return done; |
| 143 | } |
| 144 | |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 145 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, |
| 146 | struct scatterlist *sg, bool last) |
| 147 | { |
Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 148 | if (sg_dma_len(sg) > USHRT_MAX) { |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 149 | dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); |
| 150 | return -EINVAL; |
| 151 | } |
| 152 | |
| 153 | /* length must be word aligned */ |
| 154 | if (sg_dma_len(sg) % sizeof(u32)) { |
| 155 | dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", |
| 156 | sg_dma_len(sg)); |
| 157 | return -EINVAL; |
| 158 | } |
| 159 | |
Dan Carpenter | efcc289 | 2010-05-25 11:55:06 +0200 | [diff] [blame] | 160 | dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n", |
| 161 | dma_desc, (unsigned long long)sg_dma_address(sg)); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 162 | |
| 163 | dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; |
| 164 | dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; |
| 165 | dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; |
| 166 | dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; |
| 167 | |
| 168 | dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; |
| 169 | dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; |
| 170 | |
| 171 | dma_desc[1] = 0x00; |
| 172 | dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ |
| 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | /* Must be called with the spinlock held */ |
| 178 | static void __td_start_dma(struct timb_dma_chan *td_chan) |
| 179 | { |
| 180 | struct timb_dma_desc *td_desc; |
| 181 | |
| 182 | if (td_chan->ongoing) { |
| 183 | dev_err(chan2dev(&td_chan->chan), |
| 184 | "Transfer already ongoing\n"); |
| 185 | return; |
| 186 | } |
| 187 | |
| 188 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, |
| 189 | desc_node); |
| 190 | |
| 191 | dev_dbg(chan2dev(&td_chan->chan), |
| 192 | "td_chan: %p, chan: %d, membase: %p\n", |
| 193 | td_chan, td_chan->chan.chan_id, td_chan->membase); |
| 194 | |
Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 195 | if (td_chan->direction == DMA_DEV_TO_MEM) { |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 196 | |
| 197 | /* descriptor address */ |
| 198 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); |
| 199 | iowrite32(td_desc->txd.phys, td_chan->membase + |
| 200 | TIMBDMA_OFFS_RX_DLAR); |
| 201 | /* Bytes per line */ |
| 202 | iowrite32(td_chan->bytes_per_line, td_chan->membase + |
| 203 | TIMBDMA_OFFS_RX_BPRR); |
| 204 | /* enable RX */ |
| 205 | iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
| 206 | } else { |
| 207 | /* address high */ |
| 208 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); |
| 209 | iowrite32(td_desc->txd.phys, td_chan->membase + |
| 210 | TIMBDMA_OFFS_TX_DLAR); |
| 211 | } |
| 212 | |
| 213 | td_chan->ongoing = true; |
| 214 | |
| 215 | if (td_desc->interrupt) |
| 216 | __td_enable_chan_irq(td_chan); |
| 217 | } |
| 218 | |
| 219 | static void __td_finish(struct timb_dma_chan *td_chan) |
| 220 | { |
Dave Jiang | a06a5bb | 2016-07-20 13:13:22 -0700 | [diff] [blame] | 221 | struct dmaengine_desc_callback cb; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 222 | struct dma_async_tx_descriptor *txd; |
| 223 | struct timb_dma_desc *td_desc; |
| 224 | |
| 225 | /* can happen if the descriptor is canceled */ |
| 226 | if (list_empty(&td_chan->active_list)) |
| 227 | return; |
| 228 | |
| 229 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, |
| 230 | desc_node); |
| 231 | txd = &td_desc->txd; |
| 232 | |
| 233 | dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", |
| 234 | txd->cookie); |
| 235 | |
| 236 | /* make sure to stop the transfer */ |
Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 237 | if (td_chan->direction == DMA_DEV_TO_MEM) |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 238 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
| 239 | /* Currently no support for stopping DMA transfers |
| 240 | else |
| 241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); |
| 242 | */ |
Russell King - ARM Linux | f7fbce0 | 2012-03-06 22:35:07 +0000 | [diff] [blame] | 243 | dma_cookie_complete(txd); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 244 | td_chan->ongoing = false; |
| 245 | |
Dave Jiang | a06a5bb | 2016-07-20 13:13:22 -0700 | [diff] [blame] | 246 | dmaengine_desc_get_callback(txd, &cb); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 247 | |
| 248 | list_move(&td_desc->desc_node, &td_chan->free_list); |
| 249 | |
Dan Williams | d38a8c6 | 2013-10-18 19:35:23 +0200 | [diff] [blame] | 250 | dma_descriptor_unmap(txd); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 251 | /* |
| 252 | * The API requires that no submissions are done from a |
| 253 | * callback, so we don't need to drop the lock here |
| 254 | */ |
Dave Jiang | a06a5bb | 2016-07-20 13:13:22 -0700 | [diff] [blame] | 255 | dmaengine_desc_callback_invoke(&cb, NULL); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | static u32 __td_ier_mask(struct timb_dma *td) |
| 259 | { |
| 260 | int i; |
| 261 | u32 ret = 0; |
| 262 | |
| 263 | for (i = 0; i < td->dma.chancnt; i++) { |
| 264 | struct timb_dma_chan *td_chan = td->channels + i; |
| 265 | if (td_chan->ongoing) { |
| 266 | struct timb_dma_desc *td_desc = |
| 267 | list_entry(td_chan->active_list.next, |
| 268 | struct timb_dma_desc, desc_node); |
| 269 | if (td_desc->interrupt) |
| 270 | ret |= 1 << i; |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | return ret; |
| 275 | } |
| 276 | |
| 277 | static void __td_start_next(struct timb_dma_chan *td_chan) |
| 278 | { |
| 279 | struct timb_dma_desc *td_desc; |
| 280 | |
| 281 | BUG_ON(list_empty(&td_chan->queue)); |
| 282 | BUG_ON(td_chan->ongoing); |
| 283 | |
| 284 | td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, |
| 285 | desc_node); |
| 286 | |
| 287 | dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", |
| 288 | __func__, td_desc->txd.cookie); |
| 289 | |
| 290 | list_move(&td_desc->desc_node, &td_chan->active_list); |
| 291 | __td_start_dma(td_chan); |
| 292 | } |
| 293 | |
| 294 | static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) |
| 295 | { |
| 296 | struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, |
| 297 | txd); |
| 298 | struct timb_dma_chan *td_chan = container_of(txd->chan, |
| 299 | struct timb_dma_chan, chan); |
| 300 | dma_cookie_t cookie; |
| 301 | |
| 302 | spin_lock_bh(&td_chan->lock); |
Russell King - ARM Linux | 884485e | 2012-03-06 22:34:46 +0000 | [diff] [blame] | 303 | cookie = dma_cookie_assign(txd); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 304 | |
| 305 | if (list_empty(&td_chan->active_list)) { |
| 306 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, |
| 307 | txd->cookie); |
| 308 | list_add_tail(&td_desc->desc_node, &td_chan->active_list); |
| 309 | __td_start_dma(td_chan); |
| 310 | } else { |
| 311 | dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", |
| 312 | txd->cookie); |
| 313 | |
| 314 | list_add_tail(&td_desc->desc_node, &td_chan->queue); |
| 315 | } |
| 316 | |
| 317 | spin_unlock_bh(&td_chan->lock); |
| 318 | |
| 319 | return cookie; |
| 320 | } |
| 321 | |
| 322 | static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) |
| 323 | { |
| 324 | struct dma_chan *chan = &td_chan->chan; |
| 325 | struct timb_dma_desc *td_desc; |
| 326 | int err; |
| 327 | |
| 328 | td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); |
Peter Griffin | aef94fe | 2016-06-07 18:38:41 +0100 | [diff] [blame] | 329 | if (!td_desc) |
Julia Lawall | 4856800 | 2010-05-27 14:33:17 +0200 | [diff] [blame] | 330 | goto out; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 331 | |
| 332 | td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; |
| 333 | |
| 334 | td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); |
Peter Griffin | aef94fe | 2016-06-07 18:38:41 +0100 | [diff] [blame] | 335 | if (!td_desc->desc_list) |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 336 | goto err; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 337 | |
| 338 | dma_async_tx_descriptor_init(&td_desc->txd, chan); |
| 339 | td_desc->txd.tx_submit = td_tx_submit; |
| 340 | td_desc->txd.flags = DMA_CTRL_ACK; |
| 341 | |
| 342 | td_desc->txd.phys = dma_map_single(chan2dmadev(chan), |
Vinod Koul | d561394 | 2011-11-28 08:51:16 +0530 | [diff] [blame] | 343 | td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 344 | |
| 345 | err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); |
| 346 | if (err) { |
| 347 | dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); |
| 348 | goto err; |
| 349 | } |
| 350 | |
| 351 | return td_desc; |
| 352 | err: |
| 353 | kfree(td_desc->desc_list); |
| 354 | kfree(td_desc); |
Julia Lawall | 4856800 | 2010-05-27 14:33:17 +0200 | [diff] [blame] | 355 | out: |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 356 | return NULL; |
| 357 | |
| 358 | } |
| 359 | |
| 360 | static void td_free_desc(struct timb_dma_desc *td_desc) |
| 361 | { |
| 362 | dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); |
| 363 | dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, |
Vinod Koul | d561394 | 2011-11-28 08:51:16 +0530 | [diff] [blame] | 364 | td_desc->desc_list_len, DMA_TO_DEVICE); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 365 | |
| 366 | kfree(td_desc->desc_list); |
| 367 | kfree(td_desc); |
| 368 | } |
| 369 | |
| 370 | static void td_desc_put(struct timb_dma_chan *td_chan, |
| 371 | struct timb_dma_desc *td_desc) |
| 372 | { |
| 373 | dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); |
| 374 | |
| 375 | spin_lock_bh(&td_chan->lock); |
| 376 | list_add(&td_desc->desc_node, &td_chan->free_list); |
| 377 | spin_unlock_bh(&td_chan->lock); |
| 378 | } |
| 379 | |
| 380 | static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) |
| 381 | { |
| 382 | struct timb_dma_desc *td_desc, *_td_desc; |
| 383 | struct timb_dma_desc *ret = NULL; |
| 384 | |
| 385 | spin_lock_bh(&td_chan->lock); |
| 386 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, |
| 387 | desc_node) { |
| 388 | if (async_tx_test_ack(&td_desc->txd)) { |
| 389 | list_del(&td_desc->desc_node); |
| 390 | ret = td_desc; |
| 391 | break; |
| 392 | } |
| 393 | dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", |
| 394 | td_desc); |
| 395 | } |
| 396 | spin_unlock_bh(&td_chan->lock); |
| 397 | |
| 398 | return ret; |
| 399 | } |
| 400 | |
| 401 | static int td_alloc_chan_resources(struct dma_chan *chan) |
| 402 | { |
| 403 | struct timb_dma_chan *td_chan = |
| 404 | container_of(chan, struct timb_dma_chan, chan); |
| 405 | int i; |
| 406 | |
| 407 | dev_dbg(chan2dev(chan), "%s: entry\n", __func__); |
| 408 | |
| 409 | BUG_ON(!list_empty(&td_chan->free_list)); |
| 410 | for (i = 0; i < td_chan->descs; i++) { |
| 411 | struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); |
| 412 | if (!td_desc) { |
| 413 | if (i) |
| 414 | break; |
| 415 | else { |
| 416 | dev_err(chan2dev(chan), |
Colin Ian King | dd54006 | 2017-12-01 09:25:28 +0000 | [diff] [blame] | 417 | "Couldn't allocate any descriptors\n"); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 418 | return -ENOMEM; |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | td_desc_put(td_chan, td_desc); |
| 423 | } |
| 424 | |
| 425 | spin_lock_bh(&td_chan->lock); |
Russell King - ARM Linux | d3ee98cdc | 2012-03-06 22:35:47 +0000 | [diff] [blame] | 426 | dma_cookie_init(chan); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 427 | spin_unlock_bh(&td_chan->lock); |
| 428 | |
| 429 | return 0; |
| 430 | } |
| 431 | |
| 432 | static void td_free_chan_resources(struct dma_chan *chan) |
| 433 | { |
| 434 | struct timb_dma_chan *td_chan = |
| 435 | container_of(chan, struct timb_dma_chan, chan); |
| 436 | struct timb_dma_desc *td_desc, *_td_desc; |
| 437 | LIST_HEAD(list); |
| 438 | |
| 439 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
| 440 | |
| 441 | /* check that all descriptors are free */ |
| 442 | BUG_ON(!list_empty(&td_chan->active_list)); |
| 443 | BUG_ON(!list_empty(&td_chan->queue)); |
| 444 | |
| 445 | spin_lock_bh(&td_chan->lock); |
| 446 | list_splice_init(&td_chan->free_list, &list); |
| 447 | spin_unlock_bh(&td_chan->lock); |
| 448 | |
| 449 | list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { |
| 450 | dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, |
| 451 | td_desc); |
| 452 | td_free_desc(td_desc); |
| 453 | } |
| 454 | } |
| 455 | |
Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 456 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
| 457 | struct dma_tx_state *txstate) |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 458 | { |
Russell King - ARM Linux | 96a2af4 | 2012-03-06 22:35:27 +0000 | [diff] [blame] | 459 | enum dma_status ret; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 460 | |
| 461 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
| 462 | |
Russell King - ARM Linux | 96a2af4 | 2012-03-06 22:35:27 +0000 | [diff] [blame] | 463 | ret = dma_cookie_status(chan, cookie, txstate); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 464 | |
Vinod Koul | 949ff5b | 2012-03-13 11:58:12 +0530 | [diff] [blame] | 465 | dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 466 | |
| 467 | return ret; |
| 468 | } |
| 469 | |
| 470 | static void td_issue_pending(struct dma_chan *chan) |
| 471 | { |
| 472 | struct timb_dma_chan *td_chan = |
| 473 | container_of(chan, struct timb_dma_chan, chan); |
| 474 | |
| 475 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
| 476 | spin_lock_bh(&td_chan->lock); |
| 477 | |
| 478 | if (!list_empty(&td_chan->active_list)) |
| 479 | /* transfer ongoing */ |
| 480 | if (__td_dma_done_ack(td_chan)) |
| 481 | __td_finish(td_chan); |
| 482 | |
| 483 | if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) |
| 484 | __td_start_next(td_chan); |
| 485 | |
| 486 | spin_unlock_bh(&td_chan->lock); |
| 487 | } |
| 488 | |
| 489 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, |
| 490 | struct scatterlist *sgl, unsigned int sg_len, |
Alexandre Bounine | 185ecb5 | 2012-03-08 15:35:13 -0500 | [diff] [blame] | 491 | enum dma_transfer_direction direction, unsigned long flags, |
| 492 | void *context) |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 493 | { |
| 494 | struct timb_dma_chan *td_chan = |
| 495 | container_of(chan, struct timb_dma_chan, chan); |
| 496 | struct timb_dma_desc *td_desc; |
| 497 | struct scatterlist *sg; |
| 498 | unsigned int i; |
| 499 | unsigned int desc_usage = 0; |
| 500 | |
| 501 | if (!sgl || !sg_len) { |
| 502 | dev_err(chan2dev(chan), "%s: No SG list\n", __func__); |
| 503 | return NULL; |
| 504 | } |
| 505 | |
| 506 | /* even channels are for RX, odd for TX */ |
| 507 | if (td_chan->direction != direction) { |
| 508 | dev_err(chan2dev(chan), |
| 509 | "Requesting channel in wrong direction\n"); |
| 510 | return NULL; |
| 511 | } |
| 512 | |
| 513 | td_desc = td_desc_get(td_chan); |
| 514 | if (!td_desc) { |
| 515 | dev_err(chan2dev(chan), "Not enough descriptors available\n"); |
| 516 | return NULL; |
| 517 | } |
| 518 | |
| 519 | td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; |
| 520 | |
| 521 | for_each_sg(sgl, sg, sg_len, i) { |
| 522 | int err; |
| 523 | if (desc_usage > td_desc->desc_list_len) { |
| 524 | dev_err(chan2dev(chan), "No descriptor space\n"); |
| 525 | return NULL; |
| 526 | } |
| 527 | |
| 528 | err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, |
| 529 | i == (sg_len - 1)); |
| 530 | if (err) { |
| 531 | dev_err(chan2dev(chan), "Failed to update desc: %d\n", |
| 532 | err); |
| 533 | td_desc_put(td_chan, td_desc); |
| 534 | return NULL; |
| 535 | } |
| 536 | desc_usage += TIMB_DMA_DESC_SIZE; |
| 537 | } |
| 538 | |
| 539 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, |
Nathan Chancellor | 5e621f5 | 2018-09-11 16:20:25 -0700 | [diff] [blame] | 540 | td_desc->desc_list_len, DMA_TO_DEVICE); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 541 | |
| 542 | return &td_desc->txd; |
| 543 | } |
| 544 | |
Maxime Ripard | 2c55536 | 2014-11-17 14:42:41 +0100 | [diff] [blame] | 545 | static int td_terminate_all(struct dma_chan *chan) |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 546 | { |
| 547 | struct timb_dma_chan *td_chan = |
| 548 | container_of(chan, struct timb_dma_chan, chan); |
| 549 | struct timb_dma_desc *td_desc, *_td_desc; |
| 550 | |
| 551 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
| 552 | |
| 553 | /* first the easy part, put the queue into the free list */ |
| 554 | spin_lock_bh(&td_chan->lock); |
| 555 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, |
| 556 | desc_node) |
| 557 | list_move(&td_desc->desc_node, &td_chan->free_list); |
| 558 | |
Justin P. Mattock | ae0e47f | 2011-03-01 15:06:02 +0100 | [diff] [blame] | 559 | /* now tear down the running */ |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 560 | __td_finish(td_chan); |
| 561 | spin_unlock_bh(&td_chan->lock); |
Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 562 | |
| 563 | return 0; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 564 | } |
| 565 | |
Allen Pais | 8354795 | 2020-08-31 16:05:35 +0530 | [diff] [blame] | 566 | static void td_tasklet(struct tasklet_struct *t) |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 567 | { |
Allen Pais | 8354795 | 2020-08-31 16:05:35 +0530 | [diff] [blame] | 568 | struct timb_dma *td = from_tasklet(td, t, tasklet); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 569 | u32 isr; |
| 570 | u32 ipr; |
| 571 | u32 ier; |
| 572 | int i; |
| 573 | |
| 574 | isr = ioread32(td->membase + TIMBDMA_ISR); |
| 575 | ipr = isr & __td_ier_mask(td); |
| 576 | |
| 577 | /* ack the interrupts */ |
| 578 | iowrite32(ipr, td->membase + TIMBDMA_ISR); |
| 579 | |
| 580 | for (i = 0; i < td->dma.chancnt; i++) |
| 581 | if (ipr & (1 << i)) { |
| 582 | struct timb_dma_chan *td_chan = td->channels + i; |
| 583 | spin_lock(&td_chan->lock); |
| 584 | __td_finish(td_chan); |
| 585 | if (!list_empty(&td_chan->queue)) |
| 586 | __td_start_next(td_chan); |
| 587 | spin_unlock(&td_chan->lock); |
| 588 | } |
| 589 | |
| 590 | ier = __td_ier_mask(td); |
| 591 | iowrite32(ier, td->membase + TIMBDMA_IER); |
| 592 | } |
| 593 | |
| 594 | |
| 595 | static irqreturn_t td_irq(int irq, void *devid) |
| 596 | { |
| 597 | struct timb_dma *td = devid; |
| 598 | u32 ipr = ioread32(td->membase + TIMBDMA_IPR); |
| 599 | |
| 600 | if (ipr) { |
| 601 | /* disable interrupts, will be re-enabled in tasklet */ |
| 602 | iowrite32(0, td->membase + TIMBDMA_IER); |
| 603 | |
| 604 | tasklet_schedule(&td->tasklet); |
| 605 | |
| 606 | return IRQ_HANDLED; |
| 607 | } else |
| 608 | return IRQ_NONE; |
| 609 | } |
| 610 | |
| 611 | |
Bill Pemberton | 463a1f8 | 2012-11-19 13:22:55 -0500 | [diff] [blame] | 612 | static int td_probe(struct platform_device *pdev) |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 613 | { |
Jingoo Han | d4adcc0 | 2013-07-30 17:09:11 +0900 | [diff] [blame] | 614 | struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 615 | struct timb_dma *td; |
| 616 | struct resource *iomem; |
| 617 | int irq; |
| 618 | int err; |
| 619 | int i; |
| 620 | |
| 621 | if (!pdata) { |
| 622 | dev_err(&pdev->dev, "No platform data\n"); |
| 623 | return -EINVAL; |
| 624 | } |
| 625 | |
| 626 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 627 | if (!iomem) |
| 628 | return -EINVAL; |
| 629 | |
| 630 | irq = platform_get_irq(pdev, 0); |
| 631 | if (irq < 0) |
| 632 | return irq; |
| 633 | |
| 634 | if (!request_mem_region(iomem->start, resource_size(iomem), |
| 635 | DRIVER_NAME)) |
| 636 | return -EBUSY; |
| 637 | |
Gustavo A. R. Silva | 3c215fd | 2019-01-08 09:58:52 -0600 | [diff] [blame] | 638 | td = kzalloc(struct_size(td, channels, pdata->nr_channels), |
| 639 | GFP_KERNEL); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 640 | if (!td) { |
| 641 | err = -ENOMEM; |
| 642 | goto err_release_region; |
| 643 | } |
| 644 | |
| 645 | dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); |
| 646 | |
| 647 | td->membase = ioremap(iomem->start, resource_size(iomem)); |
| 648 | if (!td->membase) { |
| 649 | dev_err(&pdev->dev, "Failed to remap I/O memory\n"); |
| 650 | err = -ENOMEM; |
| 651 | goto err_free_mem; |
| 652 | } |
| 653 | |
| 654 | /* 32bit addressing */ |
| 655 | iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); |
| 656 | |
| 657 | /* disable and clear any interrupts */ |
| 658 | iowrite32(0x0, td->membase + TIMBDMA_IER); |
| 659 | iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); |
| 660 | |
Allen Pais | 8354795 | 2020-08-31 16:05:35 +0530 | [diff] [blame] | 661 | tasklet_setup(&td->tasklet, td_tasklet); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 662 | |
| 663 | err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); |
| 664 | if (err) { |
| 665 | dev_err(&pdev->dev, "Failed to request IRQ\n"); |
| 666 | goto err_tasklet_kill; |
| 667 | } |
| 668 | |
| 669 | td->dma.device_alloc_chan_resources = td_alloc_chan_resources; |
| 670 | td->dma.device_free_chan_resources = td_free_chan_resources; |
Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 671 | td->dma.device_tx_status = td_tx_status; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 672 | td->dma.device_issue_pending = td_issue_pending; |
| 673 | |
| 674 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); |
| 675 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); |
| 676 | td->dma.device_prep_slave_sg = td_prep_slave_sg; |
Maxime Ripard | 2c55536 | 2014-11-17 14:42:41 +0100 | [diff] [blame] | 677 | td->dma.device_terminate_all = td_terminate_all; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 678 | |
| 679 | td->dma.dev = &pdev->dev; |
| 680 | |
| 681 | INIT_LIST_HEAD(&td->dma.channels); |
| 682 | |
Barry Song | 46389470 | 2011-09-15 03:06:30 -0700 | [diff] [blame] | 683 | for (i = 0; i < pdata->nr_channels; i++) { |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 684 | struct timb_dma_chan *td_chan = &td->channels[i]; |
| 685 | struct timb_dma_platform_data_channel *pchan = |
| 686 | pdata->channels + i; |
| 687 | |
| 688 | /* even channels are RX, odd are TX */ |
Nicolas Kaiser | 9cb047d | 2010-10-08 00:48:01 +0200 | [diff] [blame] | 689 | if ((i % 2) == pchan->rx) { |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 690 | dev_err(&pdev->dev, "Wrong channel configuration\n"); |
| 691 | err = -EINVAL; |
Dan Carpenter | f80befe | 2011-09-23 09:16:01 +0300 | [diff] [blame] | 692 | goto err_free_irq; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | td_chan->chan.device = &td->dma; |
Russell King - ARM Linux | d3ee98cdc | 2012-03-06 22:35:47 +0000 | [diff] [blame] | 696 | dma_cookie_init(&td_chan->chan); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 697 | spin_lock_init(&td_chan->lock); |
| 698 | INIT_LIST_HEAD(&td_chan->active_list); |
| 699 | INIT_LIST_HEAD(&td_chan->queue); |
| 700 | INIT_LIST_HEAD(&td_chan->free_list); |
| 701 | |
| 702 | td_chan->descs = pchan->descriptors; |
| 703 | td_chan->desc_elems = pchan->descriptor_elements; |
| 704 | td_chan->bytes_per_line = pchan->bytes_per_line; |
Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 705 | td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : |
| 706 | DMA_MEM_TO_DEV; |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 707 | |
| 708 | td_chan->membase = td->membase + |
| 709 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + |
| 710 | (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); |
| 711 | |
| 712 | dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", |
| 713 | i, td_chan->membase); |
| 714 | |
| 715 | list_add_tail(&td_chan->chan.device_node, &td->dma.channels); |
| 716 | } |
| 717 | |
| 718 | err = dma_async_device_register(&td->dma); |
| 719 | if (err) { |
| 720 | dev_err(&pdev->dev, "Failed to register async device\n"); |
| 721 | goto err_free_irq; |
| 722 | } |
| 723 | |
| 724 | platform_set_drvdata(pdev, td); |
| 725 | |
| 726 | dev_dbg(&pdev->dev, "Probe result: %d\n", err); |
| 727 | return err; |
| 728 | |
| 729 | err_free_irq: |
| 730 | free_irq(irq, td); |
| 731 | err_tasklet_kill: |
| 732 | tasklet_kill(&td->tasklet); |
| 733 | iounmap(td->membase); |
| 734 | err_free_mem: |
| 735 | kfree(td); |
| 736 | err_release_region: |
| 737 | release_mem_region(iomem->start, resource_size(iomem)); |
| 738 | |
| 739 | return err; |
| 740 | |
| 741 | } |
| 742 | |
Greg Kroah-Hartman | 4bf27b8 | 2012-12-21 15:09:59 -0800 | [diff] [blame] | 743 | static int td_remove(struct platform_device *pdev) |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 744 | { |
| 745 | struct timb_dma *td = platform_get_drvdata(pdev); |
| 746 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 747 | int irq = platform_get_irq(pdev, 0); |
| 748 | |
| 749 | dma_async_device_unregister(&td->dma); |
| 750 | free_irq(irq, td); |
| 751 | tasklet_kill(&td->tasklet); |
| 752 | iounmap(td->membase); |
| 753 | kfree(td); |
| 754 | release_mem_region(iomem->start, resource_size(iomem)); |
| 755 | |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 756 | dev_dbg(&pdev->dev, "Removed...\n"); |
| 757 | return 0; |
| 758 | } |
| 759 | |
| 760 | static struct platform_driver td_driver = { |
| 761 | .driver = { |
| 762 | .name = DRIVER_NAME, |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 763 | }, |
| 764 | .probe = td_probe, |
Maxin B. John | 234846d | 2013-02-19 22:33:53 +0200 | [diff] [blame] | 765 | .remove = td_remove, |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 766 | }; |
| 767 | |
Axel Lin | c94e910 | 2011-11-26 15:11:12 +0800 | [diff] [blame] | 768 | module_platform_driver(td_driver); |
Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 769 | |
| 770 | MODULE_LICENSE("GPL v2"); |
| 771 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); |
| 772 | MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); |
| 773 | MODULE_ALIAS("platform:"DRIVER_NAME); |