Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 1 | /* linux/drivers/dma/pl330.c |
| 2 | * |
| 3 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. |
| 4 | * Jaswinder Singh <jassi.brar@samsung.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/dmaengine.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/amba/bus.h> |
| 19 | #include <linux/amba/pl330.h> |
Boojin Kim | a2f5203 | 2011-09-02 09:44:29 +0900 | [diff] [blame^] | 20 | #include <linux/pm_runtime.h> |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 21 | |
| 22 | #define NR_DEFAULT_DESC 16 |
| 23 | |
| 24 | enum desc_status { |
| 25 | /* In the DMAC pool */ |
| 26 | FREE, |
| 27 | /* |
| 28 | * Allocted to some channel during prep_xxx |
| 29 | * Also may be sitting on the work_list. |
| 30 | */ |
| 31 | PREP, |
| 32 | /* |
| 33 | * Sitting on the work_list and already submitted |
| 34 | * to the PL330 core. Not more than two descriptors |
| 35 | * of a channel can be BUSY at any time. |
| 36 | */ |
| 37 | BUSY, |
| 38 | /* |
| 39 | * Sitting on the channel work_list but xfer done |
| 40 | * by PL330 core |
| 41 | */ |
| 42 | DONE, |
| 43 | }; |
| 44 | |
| 45 | struct dma_pl330_chan { |
| 46 | /* Schedule desc completion */ |
| 47 | struct tasklet_struct task; |
| 48 | |
| 49 | /* DMA-Engine Channel */ |
| 50 | struct dma_chan chan; |
| 51 | |
| 52 | /* Last completed cookie */ |
| 53 | dma_cookie_t completed; |
| 54 | |
| 55 | /* List of to be xfered descriptors */ |
| 56 | struct list_head work_list; |
| 57 | |
| 58 | /* Pointer to the DMAC that manages this channel, |
| 59 | * NULL if the channel is available to be acquired. |
| 60 | * As the parent, this DMAC also provides descriptors |
| 61 | * to the channel. |
| 62 | */ |
| 63 | struct dma_pl330_dmac *dmac; |
| 64 | |
| 65 | /* To protect channel manipulation */ |
| 66 | spinlock_t lock; |
| 67 | |
| 68 | /* Token of a hardware channel thread of PL330 DMAC |
| 69 | * NULL if the channel is available to be acquired. |
| 70 | */ |
| 71 | void *pl330_chid; |
| 72 | }; |
| 73 | |
| 74 | struct dma_pl330_dmac { |
| 75 | struct pl330_info pif; |
| 76 | |
| 77 | /* DMA-Engine Device */ |
| 78 | struct dma_device ddma; |
| 79 | |
| 80 | /* Pool of descriptors available for the DMAC's channels */ |
| 81 | struct list_head desc_pool; |
| 82 | /* To protect desc_pool manipulation */ |
| 83 | spinlock_t pool_lock; |
| 84 | |
| 85 | /* Peripheral channels connected to this DMAC */ |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 86 | struct dma_pl330_chan *peripherals; /* keep at end */ |
Boojin Kim | a2f5203 | 2011-09-02 09:44:29 +0900 | [diff] [blame^] | 87 | |
| 88 | struct clk *clk; |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 89 | }; |
| 90 | |
| 91 | struct dma_pl330_desc { |
| 92 | /* To attach to a queue as child */ |
| 93 | struct list_head node; |
| 94 | |
| 95 | /* Descriptor for the DMA Engine API */ |
| 96 | struct dma_async_tx_descriptor txd; |
| 97 | |
| 98 | /* Xfer for PL330 core */ |
| 99 | struct pl330_xfer px; |
| 100 | |
| 101 | struct pl330_reqcfg rqcfg; |
| 102 | struct pl330_req req; |
| 103 | |
| 104 | enum desc_status status; |
| 105 | |
| 106 | /* The channel which currently holds this desc */ |
| 107 | struct dma_pl330_chan *pchan; |
| 108 | }; |
| 109 | |
| 110 | static inline struct dma_pl330_chan * |
| 111 | to_pchan(struct dma_chan *ch) |
| 112 | { |
| 113 | if (!ch) |
| 114 | return NULL; |
| 115 | |
| 116 | return container_of(ch, struct dma_pl330_chan, chan); |
| 117 | } |
| 118 | |
| 119 | static inline struct dma_pl330_desc * |
| 120 | to_desc(struct dma_async_tx_descriptor *tx) |
| 121 | { |
| 122 | return container_of(tx, struct dma_pl330_desc, txd); |
| 123 | } |
| 124 | |
| 125 | static inline void free_desc_list(struct list_head *list) |
| 126 | { |
| 127 | struct dma_pl330_dmac *pdmac; |
| 128 | struct dma_pl330_desc *desc; |
| 129 | struct dma_pl330_chan *pch; |
| 130 | unsigned long flags; |
| 131 | |
| 132 | if (list_empty(list)) |
| 133 | return; |
| 134 | |
| 135 | /* Finish off the work list */ |
| 136 | list_for_each_entry(desc, list, node) { |
| 137 | dma_async_tx_callback callback; |
| 138 | void *param; |
| 139 | |
| 140 | /* All desc in a list belong to same channel */ |
| 141 | pch = desc->pchan; |
| 142 | callback = desc->txd.callback; |
| 143 | param = desc->txd.callback_param; |
| 144 | |
| 145 | if (callback) |
| 146 | callback(param); |
| 147 | |
| 148 | desc->pchan = NULL; |
| 149 | } |
| 150 | |
| 151 | pdmac = pch->dmac; |
| 152 | |
| 153 | spin_lock_irqsave(&pdmac->pool_lock, flags); |
| 154 | list_splice_tail_init(list, &pdmac->desc_pool); |
| 155 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); |
| 156 | } |
| 157 | |
| 158 | static inline void fill_queue(struct dma_pl330_chan *pch) |
| 159 | { |
| 160 | struct dma_pl330_desc *desc; |
| 161 | int ret; |
| 162 | |
| 163 | list_for_each_entry(desc, &pch->work_list, node) { |
| 164 | |
| 165 | /* If already submitted */ |
| 166 | if (desc->status == BUSY) |
| 167 | break; |
| 168 | |
| 169 | ret = pl330_submit_req(pch->pl330_chid, |
| 170 | &desc->req); |
| 171 | if (!ret) { |
| 172 | desc->status = BUSY; |
| 173 | break; |
| 174 | } else if (ret == -EAGAIN) { |
| 175 | /* QFull or DMAC Dying */ |
| 176 | break; |
| 177 | } else { |
| 178 | /* Unacceptable request */ |
| 179 | desc->status = DONE; |
| 180 | dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", |
| 181 | __func__, __LINE__, desc->txd.cookie); |
| 182 | tasklet_schedule(&pch->task); |
| 183 | } |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | static void pl330_tasklet(unsigned long data) |
| 188 | { |
| 189 | struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; |
| 190 | struct dma_pl330_desc *desc, *_dt; |
| 191 | unsigned long flags; |
| 192 | LIST_HEAD(list); |
| 193 | |
| 194 | spin_lock_irqsave(&pch->lock, flags); |
| 195 | |
| 196 | /* Pick up ripe tomatoes */ |
| 197 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) |
| 198 | if (desc->status == DONE) { |
| 199 | pch->completed = desc->txd.cookie; |
| 200 | list_move_tail(&desc->node, &list); |
| 201 | } |
| 202 | |
| 203 | /* Try to submit a req imm. next to the last completed cookie */ |
| 204 | fill_queue(pch); |
| 205 | |
| 206 | /* Make sure the PL330 Channel thread is active */ |
| 207 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); |
| 208 | |
| 209 | spin_unlock_irqrestore(&pch->lock, flags); |
| 210 | |
| 211 | free_desc_list(&list); |
| 212 | } |
| 213 | |
| 214 | static void dma_pl330_rqcb(void *token, enum pl330_op_err err) |
| 215 | { |
| 216 | struct dma_pl330_desc *desc = token; |
| 217 | struct dma_pl330_chan *pch = desc->pchan; |
| 218 | unsigned long flags; |
| 219 | |
| 220 | /* If desc aborted */ |
| 221 | if (!pch) |
| 222 | return; |
| 223 | |
| 224 | spin_lock_irqsave(&pch->lock, flags); |
| 225 | |
| 226 | desc->status = DONE; |
| 227 | |
| 228 | spin_unlock_irqrestore(&pch->lock, flags); |
| 229 | |
| 230 | tasklet_schedule(&pch->task); |
| 231 | } |
| 232 | |
| 233 | static int pl330_alloc_chan_resources(struct dma_chan *chan) |
| 234 | { |
| 235 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 236 | struct dma_pl330_dmac *pdmac = pch->dmac; |
| 237 | unsigned long flags; |
| 238 | |
| 239 | spin_lock_irqsave(&pch->lock, flags); |
| 240 | |
| 241 | pch->completed = chan->cookie = 1; |
| 242 | |
| 243 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); |
| 244 | if (!pch->pl330_chid) { |
| 245 | spin_unlock_irqrestore(&pch->lock, flags); |
| 246 | return 0; |
| 247 | } |
| 248 | |
| 249 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); |
| 250 | |
| 251 | spin_unlock_irqrestore(&pch->lock, flags); |
| 252 | |
| 253 | return 1; |
| 254 | } |
| 255 | |
| 256 | static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) |
| 257 | { |
| 258 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 259 | struct dma_pl330_desc *desc; |
| 260 | unsigned long flags; |
| 261 | |
| 262 | /* Only supports DMA_TERMINATE_ALL */ |
| 263 | if (cmd != DMA_TERMINATE_ALL) |
| 264 | return -ENXIO; |
| 265 | |
| 266 | spin_lock_irqsave(&pch->lock, flags); |
| 267 | |
| 268 | /* FLUSH the PL330 Channel thread */ |
| 269 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); |
| 270 | |
| 271 | /* Mark all desc done */ |
| 272 | list_for_each_entry(desc, &pch->work_list, node) |
| 273 | desc->status = DONE; |
| 274 | |
| 275 | spin_unlock_irqrestore(&pch->lock, flags); |
| 276 | |
| 277 | pl330_tasklet((unsigned long) pch); |
| 278 | |
| 279 | return 0; |
| 280 | } |
| 281 | |
| 282 | static void pl330_free_chan_resources(struct dma_chan *chan) |
| 283 | { |
| 284 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 285 | unsigned long flags; |
| 286 | |
| 287 | spin_lock_irqsave(&pch->lock, flags); |
| 288 | |
| 289 | tasklet_kill(&pch->task); |
| 290 | |
| 291 | pl330_release_channel(pch->pl330_chid); |
| 292 | pch->pl330_chid = NULL; |
| 293 | |
| 294 | spin_unlock_irqrestore(&pch->lock, flags); |
| 295 | } |
| 296 | |
| 297 | static enum dma_status |
| 298 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
| 299 | struct dma_tx_state *txstate) |
| 300 | { |
| 301 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 302 | dma_cookie_t last_done, last_used; |
| 303 | int ret; |
| 304 | |
| 305 | last_done = pch->completed; |
| 306 | last_used = chan->cookie; |
| 307 | |
| 308 | ret = dma_async_is_complete(cookie, last_done, last_used); |
| 309 | |
| 310 | dma_set_tx_state(txstate, last_done, last_used, 0); |
| 311 | |
| 312 | return ret; |
| 313 | } |
| 314 | |
| 315 | static void pl330_issue_pending(struct dma_chan *chan) |
| 316 | { |
| 317 | pl330_tasklet((unsigned long) to_pchan(chan)); |
| 318 | } |
| 319 | |
| 320 | /* |
| 321 | * We returned the last one of the circular list of descriptor(s) |
| 322 | * from prep_xxx, so the argument to submit corresponds to the last |
| 323 | * descriptor of the list. |
| 324 | */ |
| 325 | static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) |
| 326 | { |
| 327 | struct dma_pl330_desc *desc, *last = to_desc(tx); |
| 328 | struct dma_pl330_chan *pch = to_pchan(tx->chan); |
| 329 | dma_cookie_t cookie; |
| 330 | unsigned long flags; |
| 331 | |
| 332 | spin_lock_irqsave(&pch->lock, flags); |
| 333 | |
| 334 | /* Assign cookies to all nodes */ |
| 335 | cookie = tx->chan->cookie; |
| 336 | |
| 337 | while (!list_empty(&last->node)) { |
| 338 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); |
| 339 | |
| 340 | if (++cookie < 0) |
| 341 | cookie = 1; |
| 342 | desc->txd.cookie = cookie; |
| 343 | |
| 344 | list_move_tail(&desc->node, &pch->work_list); |
| 345 | } |
| 346 | |
| 347 | if (++cookie < 0) |
| 348 | cookie = 1; |
| 349 | last->txd.cookie = cookie; |
| 350 | |
| 351 | list_add_tail(&last->node, &pch->work_list); |
| 352 | |
| 353 | tx->chan->cookie = cookie; |
| 354 | |
| 355 | spin_unlock_irqrestore(&pch->lock, flags); |
| 356 | |
| 357 | return cookie; |
| 358 | } |
| 359 | |
| 360 | static inline void _init_desc(struct dma_pl330_desc *desc) |
| 361 | { |
| 362 | desc->pchan = NULL; |
| 363 | desc->req.x = &desc->px; |
| 364 | desc->req.token = desc; |
| 365 | desc->rqcfg.swap = SWAP_NO; |
| 366 | desc->rqcfg.privileged = 0; |
| 367 | desc->rqcfg.insnaccess = 0; |
| 368 | desc->rqcfg.scctl = SCCTRL0; |
| 369 | desc->rqcfg.dcctl = DCCTRL0; |
| 370 | desc->req.cfg = &desc->rqcfg; |
| 371 | desc->req.xfer_cb = dma_pl330_rqcb; |
| 372 | desc->txd.tx_submit = pl330_tx_submit; |
| 373 | |
| 374 | INIT_LIST_HEAD(&desc->node); |
| 375 | } |
| 376 | |
| 377 | /* Returns the number of descriptors added to the DMAC pool */ |
| 378 | int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) |
| 379 | { |
| 380 | struct dma_pl330_desc *desc; |
| 381 | unsigned long flags; |
| 382 | int i; |
| 383 | |
| 384 | if (!pdmac) |
| 385 | return 0; |
| 386 | |
| 387 | desc = kmalloc(count * sizeof(*desc), flg); |
| 388 | if (!desc) |
| 389 | return 0; |
| 390 | |
| 391 | spin_lock_irqsave(&pdmac->pool_lock, flags); |
| 392 | |
| 393 | for (i = 0; i < count; i++) { |
| 394 | _init_desc(&desc[i]); |
| 395 | list_add_tail(&desc[i].node, &pdmac->desc_pool); |
| 396 | } |
| 397 | |
| 398 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); |
| 399 | |
| 400 | return count; |
| 401 | } |
| 402 | |
| 403 | static struct dma_pl330_desc * |
| 404 | pluck_desc(struct dma_pl330_dmac *pdmac) |
| 405 | { |
| 406 | struct dma_pl330_desc *desc = NULL; |
| 407 | unsigned long flags; |
| 408 | |
| 409 | if (!pdmac) |
| 410 | return NULL; |
| 411 | |
| 412 | spin_lock_irqsave(&pdmac->pool_lock, flags); |
| 413 | |
| 414 | if (!list_empty(&pdmac->desc_pool)) { |
| 415 | desc = list_entry(pdmac->desc_pool.next, |
| 416 | struct dma_pl330_desc, node); |
| 417 | |
| 418 | list_del_init(&desc->node); |
| 419 | |
| 420 | desc->status = PREP; |
| 421 | desc->txd.callback = NULL; |
| 422 | } |
| 423 | |
| 424 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); |
| 425 | |
| 426 | return desc; |
| 427 | } |
| 428 | |
| 429 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) |
| 430 | { |
| 431 | struct dma_pl330_dmac *pdmac = pch->dmac; |
| 432 | struct dma_pl330_peri *peri = pch->chan.private; |
| 433 | struct dma_pl330_desc *desc; |
| 434 | |
| 435 | /* Pluck one desc from the pool of DMAC */ |
| 436 | desc = pluck_desc(pdmac); |
| 437 | |
| 438 | /* If the DMAC pool is empty, alloc new */ |
| 439 | if (!desc) { |
| 440 | if (!add_desc(pdmac, GFP_ATOMIC, 1)) |
| 441 | return NULL; |
| 442 | |
| 443 | /* Try again */ |
| 444 | desc = pluck_desc(pdmac); |
| 445 | if (!desc) { |
| 446 | dev_err(pch->dmac->pif.dev, |
| 447 | "%s:%d ALERT!\n", __func__, __LINE__); |
| 448 | return NULL; |
| 449 | } |
| 450 | } |
| 451 | |
| 452 | /* Initialize the descriptor */ |
| 453 | desc->pchan = pch; |
| 454 | desc->txd.cookie = 0; |
| 455 | async_tx_ack(&desc->txd); |
| 456 | |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 457 | if (peri) { |
| 458 | desc->req.rqtype = peri->rqtype; |
| 459 | desc->req.peri = peri->peri_id; |
| 460 | } else { |
| 461 | desc->req.rqtype = MEMTOMEM; |
| 462 | desc->req.peri = 0; |
| 463 | } |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 464 | |
| 465 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
| 466 | |
| 467 | return desc; |
| 468 | } |
| 469 | |
| 470 | static inline void fill_px(struct pl330_xfer *px, |
| 471 | dma_addr_t dst, dma_addr_t src, size_t len) |
| 472 | { |
| 473 | px->next = NULL; |
| 474 | px->bytes = len; |
| 475 | px->dst_addr = dst; |
| 476 | px->src_addr = src; |
| 477 | } |
| 478 | |
| 479 | static struct dma_pl330_desc * |
| 480 | __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, |
| 481 | dma_addr_t src, size_t len) |
| 482 | { |
| 483 | struct dma_pl330_desc *desc = pl330_get_desc(pch); |
| 484 | |
| 485 | if (!desc) { |
| 486 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", |
| 487 | __func__, __LINE__); |
| 488 | return NULL; |
| 489 | } |
| 490 | |
| 491 | /* |
| 492 | * Ideally we should lookout for reqs bigger than |
| 493 | * those that can be programmed with 256 bytes of |
| 494 | * MC buffer, but considering a req size is seldom |
| 495 | * going to be word-unaligned and more than 200MB, |
| 496 | * we take it easy. |
| 497 | * Also, should the limit is reached we'd rather |
| 498 | * have the platform increase MC buffer size than |
| 499 | * complicating this API driver. |
| 500 | */ |
| 501 | fill_px(&desc->px, dst, src, len); |
| 502 | |
| 503 | return desc; |
| 504 | } |
| 505 | |
| 506 | /* Call after fixing burst size */ |
| 507 | static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) |
| 508 | { |
| 509 | struct dma_pl330_chan *pch = desc->pchan; |
| 510 | struct pl330_info *pi = &pch->dmac->pif; |
| 511 | int burst_len; |
| 512 | |
| 513 | burst_len = pi->pcfg.data_bus_width / 8; |
| 514 | burst_len *= pi->pcfg.data_buf_dep; |
| 515 | burst_len >>= desc->rqcfg.brst_size; |
| 516 | |
| 517 | /* src/dst_burst_len can't be more than 16 */ |
| 518 | if (burst_len > 16) |
| 519 | burst_len = 16; |
| 520 | |
| 521 | while (burst_len > 1) { |
| 522 | if (!(len % (burst_len << desc->rqcfg.brst_size))) |
| 523 | break; |
| 524 | burst_len--; |
| 525 | } |
| 526 | |
| 527 | return burst_len; |
| 528 | } |
| 529 | |
| 530 | static struct dma_async_tx_descriptor * |
| 531 | pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, |
| 532 | dma_addr_t src, size_t len, unsigned long flags) |
| 533 | { |
| 534 | struct dma_pl330_desc *desc; |
| 535 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 536 | struct dma_pl330_peri *peri = chan->private; |
| 537 | struct pl330_info *pi; |
| 538 | int burst; |
| 539 | |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 540 | if (unlikely(!pch || !len)) |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 541 | return NULL; |
| 542 | |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 543 | if (peri && peri->rqtype != MEMTOMEM) |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 544 | return NULL; |
| 545 | |
| 546 | pi = &pch->dmac->pif; |
| 547 | |
| 548 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); |
| 549 | if (!desc) |
| 550 | return NULL; |
| 551 | |
| 552 | desc->rqcfg.src_inc = 1; |
| 553 | desc->rqcfg.dst_inc = 1; |
| 554 | |
| 555 | /* Select max possible burst size */ |
| 556 | burst = pi->pcfg.data_bus_width / 8; |
| 557 | |
| 558 | while (burst > 1) { |
| 559 | if (!(len % burst)) |
| 560 | break; |
| 561 | burst /= 2; |
| 562 | } |
| 563 | |
| 564 | desc->rqcfg.brst_size = 0; |
| 565 | while (burst != (1 << desc->rqcfg.brst_size)) |
| 566 | desc->rqcfg.brst_size++; |
| 567 | |
| 568 | desc->rqcfg.brst_len = get_burst_len(desc, len); |
| 569 | |
| 570 | desc->txd.flags = flags; |
| 571 | |
| 572 | return &desc->txd; |
| 573 | } |
| 574 | |
| 575 | static struct dma_async_tx_descriptor * |
| 576 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
| 577 | unsigned int sg_len, enum dma_data_direction direction, |
| 578 | unsigned long flg) |
| 579 | { |
| 580 | struct dma_pl330_desc *first, *desc = NULL; |
| 581 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 582 | struct dma_pl330_peri *peri = chan->private; |
| 583 | struct scatterlist *sg; |
| 584 | unsigned long flags; |
| 585 | int i, burst_size; |
| 586 | dma_addr_t addr; |
| 587 | |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 588 | if (unlikely(!pch || !sgl || !sg_len || !peri)) |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 589 | return NULL; |
| 590 | |
| 591 | /* Make sure the direction is consistent */ |
| 592 | if ((direction == DMA_TO_DEVICE && |
| 593 | peri->rqtype != MEMTODEV) || |
| 594 | (direction == DMA_FROM_DEVICE && |
| 595 | peri->rqtype != DEVTOMEM)) { |
| 596 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", |
| 597 | __func__, __LINE__); |
| 598 | return NULL; |
| 599 | } |
| 600 | |
| 601 | addr = peri->fifo_addr; |
| 602 | burst_size = peri->burst_sz; |
| 603 | |
| 604 | first = NULL; |
| 605 | |
| 606 | for_each_sg(sgl, sg, sg_len, i) { |
| 607 | |
| 608 | desc = pl330_get_desc(pch); |
| 609 | if (!desc) { |
| 610 | struct dma_pl330_dmac *pdmac = pch->dmac; |
| 611 | |
| 612 | dev_err(pch->dmac->pif.dev, |
| 613 | "%s:%d Unable to fetch desc\n", |
| 614 | __func__, __LINE__); |
| 615 | if (!first) |
| 616 | return NULL; |
| 617 | |
| 618 | spin_lock_irqsave(&pdmac->pool_lock, flags); |
| 619 | |
| 620 | while (!list_empty(&first->node)) { |
| 621 | desc = list_entry(first->node.next, |
| 622 | struct dma_pl330_desc, node); |
| 623 | list_move_tail(&desc->node, &pdmac->desc_pool); |
| 624 | } |
| 625 | |
| 626 | list_move_tail(&first->node, &pdmac->desc_pool); |
| 627 | |
| 628 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); |
| 629 | |
| 630 | return NULL; |
| 631 | } |
| 632 | |
| 633 | if (!first) |
| 634 | first = desc; |
| 635 | else |
| 636 | list_add_tail(&desc->node, &first->node); |
| 637 | |
| 638 | if (direction == DMA_TO_DEVICE) { |
| 639 | desc->rqcfg.src_inc = 1; |
| 640 | desc->rqcfg.dst_inc = 0; |
| 641 | fill_px(&desc->px, |
| 642 | addr, sg_dma_address(sg), sg_dma_len(sg)); |
| 643 | } else { |
| 644 | desc->rqcfg.src_inc = 0; |
| 645 | desc->rqcfg.dst_inc = 1; |
| 646 | fill_px(&desc->px, |
| 647 | sg_dma_address(sg), addr, sg_dma_len(sg)); |
| 648 | } |
| 649 | |
| 650 | desc->rqcfg.brst_size = burst_size; |
| 651 | desc->rqcfg.brst_len = 1; |
| 652 | } |
| 653 | |
| 654 | /* Return the last desc in the chain */ |
| 655 | desc->txd.flags = flg; |
| 656 | return &desc->txd; |
| 657 | } |
| 658 | |
| 659 | static irqreturn_t pl330_irq_handler(int irq, void *data) |
| 660 | { |
| 661 | if (pl330_update(data)) |
| 662 | return IRQ_HANDLED; |
| 663 | else |
| 664 | return IRQ_NONE; |
| 665 | } |
| 666 | |
| 667 | static int __devinit |
Russell King | aa25afa | 2011-02-19 15:55:00 +0000 | [diff] [blame] | 668 | pl330_probe(struct amba_device *adev, const struct amba_id *id) |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 669 | { |
| 670 | struct dma_pl330_platdata *pdat; |
| 671 | struct dma_pl330_dmac *pdmac; |
| 672 | struct dma_pl330_chan *pch; |
| 673 | struct pl330_info *pi; |
| 674 | struct dma_device *pd; |
| 675 | struct resource *res; |
| 676 | int i, ret, irq; |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 677 | int num_chan; |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 678 | |
| 679 | pdat = adev->dev.platform_data; |
| 680 | |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 681 | /* Allocate a new DMAC and its Channels */ |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 682 | pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 683 | if (!pdmac) { |
| 684 | dev_err(&adev->dev, "unable to allocate mem\n"); |
| 685 | return -ENOMEM; |
| 686 | } |
| 687 | |
| 688 | pi = &pdmac->pif; |
| 689 | pi->dev = &adev->dev; |
| 690 | pi->pl330_data = NULL; |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 691 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 692 | |
| 693 | res = &adev->res; |
| 694 | request_mem_region(res->start, resource_size(res), "dma-pl330"); |
| 695 | |
| 696 | pi->base = ioremap(res->start, resource_size(res)); |
| 697 | if (!pi->base) { |
| 698 | ret = -ENXIO; |
| 699 | goto probe_err1; |
| 700 | } |
| 701 | |
Boojin Kim | a2f5203 | 2011-09-02 09:44:29 +0900 | [diff] [blame^] | 702 | pdmac->clk = clk_get(&adev->dev, "dma"); |
| 703 | if (IS_ERR(pdmac->clk)) { |
| 704 | dev_err(&adev->dev, "Cannot get operation clock.\n"); |
| 705 | ret = -EINVAL; |
| 706 | goto probe_err1; |
| 707 | } |
| 708 | |
| 709 | amba_set_drvdata(adev, pdmac); |
| 710 | |
| 711 | #ifdef CONFIG_PM_RUNTIME |
| 712 | /* to use the runtime PM helper functions */ |
| 713 | pm_runtime_enable(&adev->dev); |
| 714 | |
| 715 | /* enable the power domain */ |
| 716 | if (pm_runtime_get_sync(&adev->dev)) { |
| 717 | dev_err(&adev->dev, "failed to get runtime pm\n"); |
| 718 | ret = -ENODEV; |
| 719 | goto probe_err1; |
| 720 | } |
| 721 | #else |
| 722 | /* enable dma clk */ |
| 723 | clk_enable(pdmac->clk); |
| 724 | #endif |
| 725 | |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 726 | irq = adev->irq[0]; |
| 727 | ret = request_irq(irq, pl330_irq_handler, 0, |
| 728 | dev_name(&adev->dev), pi); |
| 729 | if (ret) |
| 730 | goto probe_err2; |
| 731 | |
| 732 | ret = pl330_add(pi); |
| 733 | if (ret) |
| 734 | goto probe_err3; |
| 735 | |
| 736 | INIT_LIST_HEAD(&pdmac->desc_pool); |
| 737 | spin_lock_init(&pdmac->pool_lock); |
| 738 | |
| 739 | /* Create a descriptor pool of default size */ |
| 740 | if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) |
| 741 | dev_warn(&adev->dev, "unable to allocate desc\n"); |
| 742 | |
| 743 | pd = &pdmac->ddma; |
| 744 | INIT_LIST_HEAD(&pd->channels); |
| 745 | |
| 746 | /* Initialize channel parameters */ |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 747 | num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan); |
| 748 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 749 | |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 750 | for (i = 0; i < num_chan; i++) { |
| 751 | pch = &pdmac->peripherals[i]; |
| 752 | if (pdat) { |
| 753 | struct dma_pl330_peri *peri = &pdat->peri[i]; |
| 754 | |
| 755 | switch (peri->rqtype) { |
| 756 | case MEMTOMEM: |
| 757 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); |
| 758 | break; |
| 759 | case MEMTODEV: |
| 760 | case DEVTOMEM: |
| 761 | dma_cap_set(DMA_SLAVE, pd->cap_mask); |
| 762 | break; |
| 763 | default: |
| 764 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); |
| 765 | continue; |
| 766 | } |
| 767 | pch->chan.private = peri; |
| 768 | } else { |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 769 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); |
Rob Herring | 4e0e610 | 2011-07-25 16:05:04 -0500 | [diff] [blame] | 770 | pch->chan.private = NULL; |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 771 | } |
| 772 | |
| 773 | INIT_LIST_HEAD(&pch->work_list); |
| 774 | spin_lock_init(&pch->lock); |
| 775 | pch->pl330_chid = NULL; |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 776 | pch->chan.device = pd; |
| 777 | pch->chan.chan_id = i; |
| 778 | pch->dmac = pdmac; |
| 779 | |
| 780 | /* Add the channel to the DMAC list */ |
| 781 | pd->chancnt++; |
| 782 | list_add_tail(&pch->chan.device_node, &pd->channels); |
| 783 | } |
| 784 | |
| 785 | pd->dev = &adev->dev; |
| 786 | |
| 787 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; |
| 788 | pd->device_free_chan_resources = pl330_free_chan_resources; |
| 789 | pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; |
| 790 | pd->device_tx_status = pl330_tx_status; |
| 791 | pd->device_prep_slave_sg = pl330_prep_slave_sg; |
| 792 | pd->device_control = pl330_control; |
| 793 | pd->device_issue_pending = pl330_issue_pending; |
| 794 | |
| 795 | ret = dma_async_device_register(pd); |
| 796 | if (ret) { |
| 797 | dev_err(&adev->dev, "unable to register DMAC\n"); |
| 798 | goto probe_err4; |
| 799 | } |
| 800 | |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 801 | dev_info(&adev->dev, |
| 802 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); |
| 803 | dev_info(&adev->dev, |
| 804 | "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", |
| 805 | pi->pcfg.data_buf_dep, |
| 806 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, |
| 807 | pi->pcfg.num_peri, pi->pcfg.num_events); |
| 808 | |
| 809 | return 0; |
| 810 | |
| 811 | probe_err4: |
| 812 | pl330_del(pi); |
| 813 | probe_err3: |
| 814 | free_irq(irq, pi); |
| 815 | probe_err2: |
| 816 | iounmap(pi->base); |
| 817 | probe_err1: |
| 818 | release_mem_region(res->start, resource_size(res)); |
| 819 | kfree(pdmac); |
| 820 | |
| 821 | return ret; |
| 822 | } |
| 823 | |
| 824 | static int __devexit pl330_remove(struct amba_device *adev) |
| 825 | { |
| 826 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); |
| 827 | struct dma_pl330_chan *pch, *_p; |
| 828 | struct pl330_info *pi; |
| 829 | struct resource *res; |
| 830 | int irq; |
| 831 | |
| 832 | if (!pdmac) |
| 833 | return 0; |
| 834 | |
| 835 | amba_set_drvdata(adev, NULL); |
| 836 | |
| 837 | /* Idle the DMAC */ |
| 838 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, |
| 839 | chan.device_node) { |
| 840 | |
| 841 | /* Remove the channel */ |
| 842 | list_del(&pch->chan.device_node); |
| 843 | |
| 844 | /* Flush the channel */ |
| 845 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
| 846 | pl330_free_chan_resources(&pch->chan); |
| 847 | } |
| 848 | |
| 849 | pi = &pdmac->pif; |
| 850 | |
| 851 | pl330_del(pi); |
| 852 | |
| 853 | irq = adev->irq[0]; |
| 854 | free_irq(irq, pi); |
| 855 | |
| 856 | iounmap(pi->base); |
| 857 | |
| 858 | res = &adev->res; |
| 859 | release_mem_region(res->start, resource_size(res)); |
| 860 | |
Boojin Kim | a2f5203 | 2011-09-02 09:44:29 +0900 | [diff] [blame^] | 861 | #ifdef CONFIG_PM_RUNTIME |
| 862 | pm_runtime_put(&adev->dev); |
| 863 | pm_runtime_disable(&adev->dev); |
| 864 | #else |
| 865 | clk_disable(pdmac->clk); |
| 866 | #endif |
| 867 | |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 868 | kfree(pdmac); |
| 869 | |
| 870 | return 0; |
| 871 | } |
| 872 | |
| 873 | static struct amba_id pl330_ids[] = { |
| 874 | { |
| 875 | .id = 0x00041330, |
| 876 | .mask = 0x000fffff, |
| 877 | }, |
| 878 | { 0, 0 }, |
| 879 | }; |
| 880 | |
Boojin Kim | a2f5203 | 2011-09-02 09:44:29 +0900 | [diff] [blame^] | 881 | #ifdef CONFIG_PM_RUNTIME |
| 882 | static int pl330_runtime_suspend(struct device *dev) |
| 883 | { |
| 884 | struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); |
| 885 | |
| 886 | if (!pdmac) { |
| 887 | dev_err(dev, "failed to get dmac\n"); |
| 888 | return -ENODEV; |
| 889 | } |
| 890 | |
| 891 | clk_disable(pdmac->clk); |
| 892 | |
| 893 | return 0; |
| 894 | } |
| 895 | |
| 896 | static int pl330_runtime_resume(struct device *dev) |
| 897 | { |
| 898 | struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); |
| 899 | |
| 900 | if (!pdmac) { |
| 901 | dev_err(dev, "failed to get dmac\n"); |
| 902 | return -ENODEV; |
| 903 | } |
| 904 | |
| 905 | clk_enable(pdmac->clk); |
| 906 | |
| 907 | return 0; |
| 908 | } |
| 909 | #else |
| 910 | #define pl330_runtime_suspend NULL |
| 911 | #define pl330_runtime_resume NULL |
| 912 | #endif /* CONFIG_PM_RUNTIME */ |
| 913 | |
| 914 | static const struct dev_pm_ops pl330_pm_ops = { |
| 915 | .runtime_suspend = pl330_runtime_suspend, |
| 916 | .runtime_resume = pl330_runtime_resume, |
| 917 | }; |
| 918 | |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 919 | static struct amba_driver pl330_driver = { |
| 920 | .drv = { |
| 921 | .owner = THIS_MODULE, |
| 922 | .name = "dma-pl330", |
Boojin Kim | a2f5203 | 2011-09-02 09:44:29 +0900 | [diff] [blame^] | 923 | .pm = &pl330_pm_ops, |
Jassi Brar | b3040e4 | 2010-05-23 20:28:19 -0700 | [diff] [blame] | 924 | }, |
| 925 | .id_table = pl330_ids, |
| 926 | .probe = pl330_probe, |
| 927 | .remove = pl330_remove, |
| 928 | }; |
| 929 | |
| 930 | static int __init pl330_init(void) |
| 931 | { |
| 932 | return amba_driver_register(&pl330_driver); |
| 933 | } |
| 934 | module_init(pl330_init); |
| 935 | |
| 936 | static void __exit pl330_exit(void) |
| 937 | { |
| 938 | amba_driver_unregister(&pl330_driver); |
| 939 | return; |
| 940 | } |
| 941 | module_exit(pl330_exit); |
| 942 | |
| 943 | MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); |
| 944 | MODULE_DESCRIPTION("API Driver for PL330 DMAC"); |
| 945 | MODULE_LICENSE("GPL"); |