Wolfram Sang | 901fd85 | 2018-08-23 15:34:54 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs |
| 4 | * |
| 5 | * extracted from shdma.c |
| 6 | * |
| 7 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
| 8 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
| 9 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. |
| 10 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | #include <linux/delay.h> |
| 14 | #include <linux/shdma-base.h> |
| 15 | #include <linux/dmaengine.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/pm_runtime.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/spinlock.h> |
| 22 | |
| 23 | #include "../dmaengine.h" |
| 24 | |
| 25 | /* DMA descriptor control */ |
| 26 | enum shdma_desc_status { |
| 27 | DESC_IDLE, |
| 28 | DESC_PREPARED, |
| 29 | DESC_SUBMITTED, |
| 30 | DESC_COMPLETED, /* completed, have to call callback */ |
| 31 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ |
| 32 | }; |
| 33 | |
| 34 | #define NR_DESCS_PER_CHANNEL 32 |
| 35 | |
| 36 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) |
| 37 | #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) |
| 38 | |
| 39 | /* |
| 40 | * For slave DMA we assume, that there is a finite number of DMA slaves in the |
| 41 | * system, and that each such slave can only use a finite number of channels. |
| 42 | * We use slave channel IDs to make sure, that no such slave channel ID is |
| 43 | * allocated more than once. |
| 44 | */ |
| 45 | static unsigned int slave_num = 256; |
| 46 | module_param(slave_num, uint, 0444); |
| 47 | |
| 48 | /* A bitmask with slave_num bits */ |
| 49 | static unsigned long *shdma_slave_used; |
| 50 | |
| 51 | /* Called under spin_lock_irq(&schan->chan_lock") */ |
| 52 | static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) |
| 53 | { |
| 54 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
| 55 | const struct shdma_ops *ops = sdev->ops; |
| 56 | struct shdma_desc *sdesc; |
| 57 | |
| 58 | /* DMA work check */ |
| 59 | if (ops->channel_busy(schan)) |
| 60 | return; |
| 61 | |
| 62 | /* Find the first not transferred descriptor */ |
| 63 | list_for_each_entry(sdesc, &schan->ld_queue, node) |
| 64 | if (sdesc->mark == DESC_SUBMITTED) { |
| 65 | ops->start_xfer(schan, sdesc); |
| 66 | break; |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) |
| 71 | { |
| 72 | struct shdma_desc *chunk, *c, *desc = |
Kuninori Morimoto | 91ea74e | 2014-04-02 20:16:51 -0700 | [diff] [blame] | 73 | container_of(tx, struct shdma_desc, async_tx); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 74 | struct shdma_chan *schan = to_shdma_chan(tx->chan); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 75 | dma_async_tx_callback callback = tx->callback; |
| 76 | dma_cookie_t cookie; |
| 77 | bool power_up; |
| 78 | |
| 79 | spin_lock_irq(&schan->chan_lock); |
| 80 | |
| 81 | power_up = list_empty(&schan->ld_queue); |
| 82 | |
| 83 | cookie = dma_cookie_assign(tx); |
| 84 | |
| 85 | /* Mark all chunks of this descriptor as submitted, move to the queue */ |
| 86 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { |
| 87 | /* |
| 88 | * All chunks are on the global ld_free, so, we have to find |
| 89 | * the end of the chain ourselves |
| 90 | */ |
| 91 | if (chunk != desc && (chunk->mark == DESC_IDLE || |
| 92 | chunk->async_tx.cookie > 0 || |
| 93 | chunk->async_tx.cookie == -EBUSY || |
| 94 | &chunk->node == &schan->ld_free)) |
| 95 | break; |
| 96 | chunk->mark = DESC_SUBMITTED; |
Kuninori Morimoto | 91ea74e | 2014-04-02 20:16:51 -0700 | [diff] [blame] | 97 | if (chunk->chunks == 1) { |
| 98 | chunk->async_tx.callback = callback; |
| 99 | chunk->async_tx.callback_param = tx->callback_param; |
| 100 | } else { |
| 101 | /* Callback goes to the last chunk */ |
| 102 | chunk->async_tx.callback = NULL; |
| 103 | } |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 104 | chunk->cookie = cookie; |
| 105 | list_move_tail(&chunk->node, &schan->ld_queue); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 106 | |
| 107 | dev_dbg(schan->dev, "submit #%d@%p on %d\n", |
Kuninori Morimoto | 91ea74e | 2014-04-02 20:16:51 -0700 | [diff] [blame] | 108 | tx->cookie, &chunk->async_tx, schan->id); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 109 | } |
| 110 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 111 | if (power_up) { |
| 112 | int ret; |
| 113 | schan->pm_state = SHDMA_PM_BUSY; |
| 114 | |
| 115 | ret = pm_runtime_get(schan->dev); |
| 116 | |
| 117 | spin_unlock_irq(&schan->chan_lock); |
| 118 | if (ret < 0) |
| 119 | dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); |
| 120 | |
| 121 | pm_runtime_barrier(schan->dev); |
| 122 | |
| 123 | spin_lock_irq(&schan->chan_lock); |
| 124 | |
| 125 | /* Have we been reset, while waiting? */ |
| 126 | if (schan->pm_state != SHDMA_PM_ESTABLISHED) { |
| 127 | struct shdma_dev *sdev = |
| 128 | to_shdma_dev(schan->dma_chan.device); |
| 129 | const struct shdma_ops *ops = sdev->ops; |
| 130 | dev_dbg(schan->dev, "Bring up channel %d\n", |
| 131 | schan->id); |
| 132 | /* |
| 133 | * TODO: .xfer_setup() might fail on some platforms. |
| 134 | * Make it int then, on error remove chunks from the |
| 135 | * queue again |
| 136 | */ |
Guennadi Liakhovetski | c2cdb7e | 2012-07-05 12:29:41 +0200 | [diff] [blame] | 137 | ops->setup_xfer(schan, schan->slave_id); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 138 | |
| 139 | if (schan->pm_state == SHDMA_PM_PENDING) |
| 140 | shdma_chan_xfer_ld_queue(schan); |
| 141 | schan->pm_state = SHDMA_PM_ESTABLISHED; |
| 142 | } |
| 143 | } else { |
| 144 | /* |
| 145 | * Tell .device_issue_pending() not to run the queue, interrupts |
| 146 | * will do it anyway |
| 147 | */ |
| 148 | schan->pm_state = SHDMA_PM_PENDING; |
| 149 | } |
| 150 | |
| 151 | spin_unlock_irq(&schan->chan_lock); |
| 152 | |
| 153 | return cookie; |
| 154 | } |
| 155 | |
| 156 | /* Called with desc_lock held */ |
| 157 | static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) |
| 158 | { |
| 159 | struct shdma_desc *sdesc; |
| 160 | |
| 161 | list_for_each_entry(sdesc, &schan->ld_free, node) |
| 162 | if (sdesc->mark != DESC_PREPARED) { |
| 163 | BUG_ON(sdesc->mark != DESC_IDLE); |
| 164 | list_del(&sdesc->node); |
| 165 | return sdesc; |
| 166 | } |
| 167 | |
| 168 | return NULL; |
| 169 | } |
| 170 | |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 171 | static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 172 | { |
| 173 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
| 174 | const struct shdma_ops *ops = sdev->ops; |
Guennadi Liakhovetski | 67eacc1 | 2013-06-18 18:16:57 +0200 | [diff] [blame] | 175 | int ret, match; |
| 176 | |
| 177 | if (schan->dev->of_node) { |
| 178 | match = schan->hw_req; |
Guennadi Liakhovetski | 4981c4d | 2013-08-02 16:50:36 +0200 | [diff] [blame] | 179 | ret = ops->set_slave(schan, match, slave_addr, true); |
Guennadi Liakhovetski | 67eacc1 | 2013-06-18 18:16:57 +0200 | [diff] [blame] | 180 | if (ret < 0) |
| 181 | return ret; |
Guennadi Liakhovetski | 67eacc1 | 2013-06-18 18:16:57 +0200 | [diff] [blame] | 182 | } else { |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 183 | match = schan->real_slave_id; |
Guennadi Liakhovetski | 67eacc1 | 2013-06-18 18:16:57 +0200 | [diff] [blame] | 184 | } |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 185 | |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 186 | if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 187 | return -EINVAL; |
| 188 | |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 189 | if (test_and_set_bit(schan->real_slave_id, shdma_slave_used)) |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 190 | return -EBUSY; |
| 191 | |
Guennadi Liakhovetski | 4981c4d | 2013-08-02 16:50:36 +0200 | [diff] [blame] | 192 | ret = ops->set_slave(schan, match, slave_addr, false); |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 193 | if (ret < 0) { |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 194 | clear_bit(schan->real_slave_id, shdma_slave_used); |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 195 | return ret; |
| 196 | } |
| 197 | |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 198 | schan->slave_id = schan->real_slave_id; |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 199 | |
| 200 | return 0; |
| 201 | } |
| 202 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 203 | static int shdma_alloc_chan_resources(struct dma_chan *chan) |
| 204 | { |
| 205 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 206 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
| 207 | const struct shdma_ops *ops = sdev->ops; |
| 208 | struct shdma_desc *desc; |
| 209 | struct shdma_slave *slave = chan->private; |
| 210 | int ret, i; |
| 211 | |
| 212 | /* |
| 213 | * This relies on the guarantee from dmaengine that alloc_chan_resources |
| 214 | * never runs concurrently with itself or free_chan_resources. |
| 215 | */ |
| 216 | if (slave) { |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 217 | /* Legacy mode: .private is set in filter */ |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 218 | schan->real_slave_id = slave->slave_id; |
| 219 | ret = shdma_setup_slave(schan, 0); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 220 | if (ret < 0) |
| 221 | goto esetslave; |
Guennadi Liakhovetski | c2cdb7e | 2012-07-05 12:29:41 +0200 | [diff] [blame] | 222 | } else { |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 223 | /* Normal mode: real_slave_id was set by filter */ |
Guennadi Liakhovetski | c2cdb7e | 2012-07-05 12:29:41 +0200 | [diff] [blame] | 224 | schan->slave_id = -EINVAL; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, |
| 228 | sdev->desc_size, GFP_KERNEL); |
| 229 | if (!schan->desc) { |
| 230 | ret = -ENOMEM; |
| 231 | goto edescalloc; |
| 232 | } |
| 233 | schan->desc_num = NR_DESCS_PER_CHANNEL; |
| 234 | |
| 235 | for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { |
| 236 | desc = ops->embedded_desc(schan->desc, i); |
| 237 | dma_async_tx_descriptor_init(&desc->async_tx, |
| 238 | &schan->dma_chan); |
| 239 | desc->async_tx.tx_submit = shdma_tx_submit; |
| 240 | desc->mark = DESC_IDLE; |
| 241 | |
| 242 | list_add(&desc->node, &schan->ld_free); |
| 243 | } |
| 244 | |
| 245 | return NR_DESCS_PER_CHANNEL; |
| 246 | |
| 247 | edescalloc: |
| 248 | if (slave) |
| 249 | esetslave: |
| 250 | clear_bit(slave->slave_id, shdma_slave_used); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 251 | chan->private = NULL; |
| 252 | return ret; |
| 253 | } |
| 254 | |
Laurent Pinchart | c091ff5 | 2014-07-31 09:34:05 +0900 | [diff] [blame] | 255 | /* |
| 256 | * This is the standard shdma filter function to be used as a replacement to the |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 257 | * "old" method, using the .private pointer. |
| 258 | * You always have to pass a valid slave id as the argument, old drivers that |
| 259 | * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config |
| 260 | * need to be updated so we can remove the slave_id field from dma_slave_config. |
Laurent Pinchart | c091ff5 | 2014-07-31 09:34:05 +0900 | [diff] [blame] | 261 | * parameter. If this filter is used, the slave driver, after calling |
| 262 | * dma_request_channel(), will also have to call dmaengine_slave_config() with |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 263 | * .direction, and either .src_addr or .dst_addr set. |
| 264 | * |
Laurent Pinchart | c091ff5 | 2014-07-31 09:34:05 +0900 | [diff] [blame] | 265 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE |
| 266 | * capability! If this becomes a requirement, hardware glue drivers, using this |
| 267 | * services would have to provide their own filters, which first would check |
| 268 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do |
| 269 | * this, and only then, in case of a match, call this common filter. |
| 270 | * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). |
| 271 | * In that case the MID-RID value is used for slave channel filtering and is |
| 272 | * passed to this function in the "arg" parameter. |
| 273 | */ |
| 274 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) |
| 275 | { |
| 276 | struct shdma_chan *schan; |
| 277 | struct shdma_dev *sdev; |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 278 | int slave_id = (long)arg; |
Laurent Pinchart | c091ff5 | 2014-07-31 09:34:05 +0900 | [diff] [blame] | 279 | int ret; |
| 280 | |
| 281 | /* Only support channels handled by this driver. */ |
| 282 | if (chan->device->device_alloc_chan_resources != |
| 283 | shdma_alloc_chan_resources) |
| 284 | return false; |
| 285 | |
Laurent Pinchart | c091ff5 | 2014-07-31 09:34:05 +0900 | [diff] [blame] | 286 | schan = to_shdma_chan(chan); |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 287 | sdev = to_shdma_dev(chan->device); |
| 288 | |
| 289 | /* |
| 290 | * For DT, the schan->slave_id field is generated by the |
| 291 | * set_slave function from the slave ID that is passed in |
| 292 | * from xlate. For the non-DT case, the slave ID is |
| 293 | * directly passed into the filter function by the driver |
| 294 | */ |
| 295 | if (schan->dev->of_node) { |
| 296 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); |
| 297 | if (ret < 0) |
| 298 | return false; |
| 299 | |
| 300 | schan->real_slave_id = schan->slave_id; |
| 301 | return true; |
| 302 | } |
| 303 | |
| 304 | if (slave_id < 0) { |
| 305 | /* No slave requested - arbitrary channel */ |
| 306 | dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n"); |
| 307 | return true; |
| 308 | } |
| 309 | |
| 310 | if (slave_id >= slave_num) |
Laurent Pinchart | c091ff5 | 2014-07-31 09:34:05 +0900 | [diff] [blame] | 311 | return false; |
| 312 | |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 313 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); |
Laurent Pinchart | c091ff5 | 2014-07-31 09:34:05 +0900 | [diff] [blame] | 314 | if (ret < 0) |
| 315 | return false; |
| 316 | |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 317 | schan->real_slave_id = slave_id; |
| 318 | |
Laurent Pinchart | c091ff5 | 2014-07-31 09:34:05 +0900 | [diff] [blame] | 319 | return true; |
| 320 | } |
| 321 | EXPORT_SYMBOL(shdma_chan_filter); |
| 322 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 323 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) |
| 324 | { |
| 325 | struct shdma_desc *desc, *_desc; |
| 326 | /* Is the "exposed" head of a chain acked? */ |
| 327 | bool head_acked = false; |
| 328 | dma_cookie_t cookie = 0; |
| 329 | dma_async_tx_callback callback = NULL; |
Dave Jiang | 73fc45e | 2016-07-20 14:13:09 -0700 | [diff] [blame] | 330 | struct dmaengine_desc_callback cb; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 331 | unsigned long flags; |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 332 | LIST_HEAD(cyclic_list); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 333 | |
Dave Jiang | 73fc45e | 2016-07-20 14:13:09 -0700 | [diff] [blame] | 334 | memset(&cb, 0, sizeof(cb)); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 335 | spin_lock_irqsave(&schan->chan_lock, flags); |
| 336 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { |
| 337 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
| 338 | |
| 339 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); |
| 340 | BUG_ON(desc->mark != DESC_SUBMITTED && |
| 341 | desc->mark != DESC_COMPLETED && |
| 342 | desc->mark != DESC_WAITING); |
| 343 | |
| 344 | /* |
| 345 | * queue is ordered, and we use this loop to (1) clean up all |
| 346 | * completed descriptors, and to (2) update descriptor flags of |
| 347 | * any chunks in a (partially) completed chain |
| 348 | */ |
| 349 | if (!all && desc->mark == DESC_SUBMITTED && |
| 350 | desc->cookie != cookie) |
| 351 | break; |
| 352 | |
| 353 | if (tx->cookie > 0) |
| 354 | cookie = tx->cookie; |
| 355 | |
| 356 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
| 357 | if (schan->dma_chan.completed_cookie != desc->cookie - 1) |
| 358 | dev_dbg(schan->dev, |
| 359 | "Completing cookie %d, expected %d\n", |
| 360 | desc->cookie, |
| 361 | schan->dma_chan.completed_cookie + 1); |
| 362 | schan->dma_chan.completed_cookie = desc->cookie; |
| 363 | } |
| 364 | |
| 365 | /* Call callback on the last chunk */ |
| 366 | if (desc->mark == DESC_COMPLETED && tx->callback) { |
| 367 | desc->mark = DESC_WAITING; |
Dave Jiang | 73fc45e | 2016-07-20 14:13:09 -0700 | [diff] [blame] | 368 | dmaengine_desc_get_callback(tx, &cb); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 369 | callback = tx->callback; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 370 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", |
| 371 | tx->cookie, tx, schan->id); |
| 372 | BUG_ON(desc->chunks != 1); |
| 373 | break; |
| 374 | } |
| 375 | |
| 376 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { |
| 377 | if (desc->mark == DESC_COMPLETED) { |
| 378 | BUG_ON(tx->cookie < 0); |
| 379 | desc->mark = DESC_WAITING; |
| 380 | } |
| 381 | head_acked = async_tx_test_ack(tx); |
| 382 | } else { |
| 383 | switch (desc->mark) { |
| 384 | case DESC_COMPLETED: |
| 385 | desc->mark = DESC_WAITING; |
| 386 | /* Fall through */ |
| 387 | case DESC_WAITING: |
| 388 | if (head_acked) |
| 389 | async_tx_ack(&desc->async_tx); |
| 390 | } |
| 391 | } |
| 392 | |
| 393 | dev_dbg(schan->dev, "descriptor %p #%d completed.\n", |
| 394 | tx, tx->cookie); |
| 395 | |
| 396 | if (((desc->mark == DESC_COMPLETED || |
| 397 | desc->mark == DESC_WAITING) && |
| 398 | async_tx_test_ack(&desc->async_tx)) || all) { |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 399 | |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 400 | if (all || !desc->cyclic) { |
| 401 | /* Remove from ld_queue list */ |
| 402 | desc->mark = DESC_IDLE; |
| 403 | list_move(&desc->node, &schan->ld_free); |
| 404 | } else { |
| 405 | /* reuse as cyclic */ |
| 406 | desc->mark = DESC_SUBMITTED; |
| 407 | list_move_tail(&desc->node, &cyclic_list); |
| 408 | } |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 409 | |
| 410 | if (list_empty(&schan->ld_queue)) { |
| 411 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); |
| 412 | pm_runtime_put(schan->dev); |
| 413 | schan->pm_state = SHDMA_PM_ESTABLISHED; |
Yoshihiro Shimoda | 26fd830 | 2014-10-09 11:09:00 +0900 | [diff] [blame] | 414 | } else if (schan->pm_state == SHDMA_PM_PENDING) { |
| 415 | shdma_chan_xfer_ld_queue(schan); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 416 | } |
| 417 | } |
| 418 | } |
| 419 | |
| 420 | if (all && !callback) |
| 421 | /* |
| 422 | * Terminating and the loop completed normally: forgive |
| 423 | * uncompleted cookies |
| 424 | */ |
| 425 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; |
| 426 | |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 427 | list_splice_tail(&cyclic_list, &schan->ld_queue); |
| 428 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 429 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
| 430 | |
Dave Jiang | 73fc45e | 2016-07-20 14:13:09 -0700 | [diff] [blame] | 431 | dmaengine_desc_callback_invoke(&cb, NULL); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 432 | |
| 433 | return callback; |
| 434 | } |
| 435 | |
| 436 | /* |
| 437 | * shdma_chan_ld_cleanup - Clean up link descriptors |
| 438 | * |
| 439 | * Clean up the ld_queue of DMA channel. |
| 440 | */ |
| 441 | static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) |
| 442 | { |
| 443 | while (__ld_cleanup(schan, all)) |
| 444 | ; |
| 445 | } |
| 446 | |
| 447 | /* |
| 448 | * shdma_free_chan_resources - Free all resources of the channel. |
| 449 | */ |
| 450 | static void shdma_free_chan_resources(struct dma_chan *chan) |
| 451 | { |
| 452 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 453 | struct shdma_dev *sdev = to_shdma_dev(chan->device); |
| 454 | const struct shdma_ops *ops = sdev->ops; |
| 455 | LIST_HEAD(list); |
| 456 | |
| 457 | /* Protect against ISR */ |
| 458 | spin_lock_irq(&schan->chan_lock); |
| 459 | ops->halt_channel(schan); |
| 460 | spin_unlock_irq(&schan->chan_lock); |
| 461 | |
| 462 | /* Now no new interrupts will occur */ |
| 463 | |
| 464 | /* Prepared and not submitted descriptors can still be on the queue */ |
| 465 | if (!list_empty(&schan->ld_queue)) |
| 466 | shdma_chan_ld_cleanup(schan, true); |
| 467 | |
Guennadi Liakhovetski | c2cdb7e | 2012-07-05 12:29:41 +0200 | [diff] [blame] | 468 | if (schan->slave_id >= 0) { |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 469 | /* The caller is holding dma_list_mutex */ |
Guennadi Liakhovetski | c2cdb7e | 2012-07-05 12:29:41 +0200 | [diff] [blame] | 470 | clear_bit(schan->slave_id, shdma_slave_used); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 471 | chan->private = NULL; |
| 472 | } |
| 473 | |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 474 | schan->real_slave_id = 0; |
| 475 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 476 | spin_lock_irq(&schan->chan_lock); |
| 477 | |
| 478 | list_splice_init(&schan->ld_free, &list); |
| 479 | schan->desc_num = 0; |
| 480 | |
| 481 | spin_unlock_irq(&schan->chan_lock); |
| 482 | |
| 483 | kfree(schan->desc); |
| 484 | } |
| 485 | |
| 486 | /** |
| 487 | * shdma_add_desc - get, set up and return one transfer descriptor |
| 488 | * @schan: DMA channel |
| 489 | * @flags: DMA transfer flags |
| 490 | * @dst: destination DMA address, incremented when direction equals |
| 491 | * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM |
| 492 | * @src: source DMA address, incremented when direction equals |
| 493 | * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM |
| 494 | * @len: DMA transfer length |
| 495 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY |
| 496 | * @direction: needed for slave DMA to decide which address to keep constant, |
| 497 | * equals DMA_MEM_TO_MEM for MEMCPY |
| 498 | * Returns 0 or an error |
| 499 | * Locks: called with desc_lock held |
| 500 | */ |
| 501 | static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, |
| 502 | unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, |
| 503 | struct shdma_desc **first, enum dma_transfer_direction direction) |
| 504 | { |
| 505 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
| 506 | const struct shdma_ops *ops = sdev->ops; |
| 507 | struct shdma_desc *new; |
| 508 | size_t copy_size = *len; |
| 509 | |
| 510 | if (!copy_size) |
| 511 | return NULL; |
| 512 | |
| 513 | /* Allocate the link descriptor from the free list */ |
| 514 | new = shdma_get_desc(schan); |
| 515 | if (!new) { |
| 516 | dev_err(schan->dev, "No free link descriptor available\n"); |
| 517 | return NULL; |
| 518 | } |
| 519 | |
| 520 | ops->desc_setup(schan, new, *src, *dst, ©_size); |
| 521 | |
| 522 | if (!*first) { |
| 523 | /* First desc */ |
| 524 | new->async_tx.cookie = -EBUSY; |
| 525 | *first = new; |
| 526 | } else { |
| 527 | /* Other desc - invisible to the user */ |
| 528 | new->async_tx.cookie = -EINVAL; |
| 529 | } |
| 530 | |
| 531 | dev_dbg(schan->dev, |
Laurent Pinchart | 42e4a12 | 2013-12-11 15:29:15 +0100 | [diff] [blame] | 532 | "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n", |
| 533 | copy_size, *len, src, dst, &new->async_tx, |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 534 | new->async_tx.cookie); |
| 535 | |
| 536 | new->mark = DESC_PREPARED; |
| 537 | new->async_tx.flags = flags; |
| 538 | new->direction = direction; |
Guennadi Liakhovetski | 4f46f8a | 2012-07-30 21:28:27 +0200 | [diff] [blame] | 539 | new->partial = 0; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 540 | |
| 541 | *len -= copy_size; |
| 542 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) |
| 543 | *src += copy_size; |
| 544 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) |
| 545 | *dst += copy_size; |
| 546 | |
| 547 | return new; |
| 548 | } |
| 549 | |
| 550 | /* |
| 551 | * shdma_prep_sg - prepare transfer descriptors from an SG list |
| 552 | * |
| 553 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also |
| 554 | * converted to scatter-gather to guarantee consistent locking and a correct |
| 555 | * list manipulation. For slave DMA direction carries the usual meaning, and, |
| 556 | * logically, the SG list is RAM and the addr variable contains slave address, |
| 557 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM |
| 558 | * and the SG list contains only one element and points at the source buffer. |
| 559 | */ |
| 560 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, |
| 561 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 562 | enum dma_transfer_direction direction, unsigned long flags, bool cyclic) |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 563 | { |
| 564 | struct scatterlist *sg; |
| 565 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; |
| 566 | LIST_HEAD(tx_list); |
| 567 | int chunks = 0; |
| 568 | unsigned long irq_flags; |
| 569 | int i; |
| 570 | |
| 571 | for_each_sg(sgl, sg, sg_len, i) |
| 572 | chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); |
| 573 | |
| 574 | /* Have to lock the whole loop to protect against concurrent release */ |
| 575 | spin_lock_irqsave(&schan->chan_lock, irq_flags); |
| 576 | |
| 577 | /* |
| 578 | * Chaining: |
| 579 | * first descriptor is what user is dealing with in all API calls, its |
| 580 | * cookie is at first set to -EBUSY, at tx-submit to a positive |
| 581 | * number |
| 582 | * if more than one chunk is needed further chunks have cookie = -EINVAL |
| 583 | * the last chunk, if not equal to the first, has cookie = -ENOSPC |
| 584 | * all chunks are linked onto the tx_list head with their .node heads |
| 585 | * only during this function, then they are immediately spliced |
| 586 | * back onto the free list in form of a chain |
| 587 | */ |
| 588 | for_each_sg(sgl, sg, sg_len, i) { |
| 589 | dma_addr_t sg_addr = sg_dma_address(sg); |
| 590 | size_t len = sg_dma_len(sg); |
| 591 | |
| 592 | if (!len) |
| 593 | goto err_get_desc; |
| 594 | |
| 595 | do { |
Laurent Pinchart | 42e4a12 | 2013-12-11 15:29:15 +0100 | [diff] [blame] | 596 | dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", |
| 597 | i, sg, len, &sg_addr); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 598 | |
| 599 | if (direction == DMA_DEV_TO_MEM) |
| 600 | new = shdma_add_desc(schan, flags, |
| 601 | &sg_addr, addr, &len, &first, |
| 602 | direction); |
| 603 | else |
| 604 | new = shdma_add_desc(schan, flags, |
| 605 | addr, &sg_addr, &len, &first, |
| 606 | direction); |
| 607 | if (!new) |
| 608 | goto err_get_desc; |
| 609 | |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 610 | new->cyclic = cyclic; |
| 611 | if (cyclic) |
| 612 | new->chunks = 1; |
| 613 | else |
| 614 | new->chunks = chunks--; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 615 | list_add_tail(&new->node, &tx_list); |
| 616 | } while (len); |
| 617 | } |
| 618 | |
| 619 | if (new != first) |
| 620 | new->async_tx.cookie = -ENOSPC; |
| 621 | |
| 622 | /* Put them back on the free list, so, they don't get lost */ |
| 623 | list_splice_tail(&tx_list, &schan->ld_free); |
| 624 | |
| 625 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); |
| 626 | |
| 627 | return &first->async_tx; |
| 628 | |
| 629 | err_get_desc: |
| 630 | list_for_each_entry(new, &tx_list, node) |
| 631 | new->mark = DESC_IDLE; |
| 632 | list_splice(&tx_list, &schan->ld_free); |
| 633 | |
| 634 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); |
| 635 | |
| 636 | return NULL; |
| 637 | } |
| 638 | |
| 639 | static struct dma_async_tx_descriptor *shdma_prep_memcpy( |
| 640 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, |
| 641 | size_t len, unsigned long flags) |
| 642 | { |
| 643 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 644 | struct scatterlist sg; |
| 645 | |
| 646 | if (!chan || !len) |
| 647 | return NULL; |
| 648 | |
| 649 | BUG_ON(!schan->desc_num); |
| 650 | |
| 651 | sg_init_table(&sg, 1); |
| 652 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, |
| 653 | offset_in_page(dma_src)); |
| 654 | sg_dma_address(&sg) = dma_src; |
| 655 | sg_dma_len(&sg) = len; |
| 656 | |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 657 | return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, |
| 658 | flags, false); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( |
| 662 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
| 663 | enum dma_transfer_direction direction, unsigned long flags, void *context) |
| 664 | { |
| 665 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 666 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
| 667 | const struct shdma_ops *ops = sdev->ops; |
Guennadi Liakhovetski | c2cdb7e | 2012-07-05 12:29:41 +0200 | [diff] [blame] | 668 | int slave_id = schan->slave_id; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 669 | dma_addr_t slave_addr; |
| 670 | |
| 671 | if (!chan) |
| 672 | return NULL; |
| 673 | |
| 674 | BUG_ON(!schan->desc_num); |
| 675 | |
| 676 | /* Someone calling slave DMA on a generic channel? */ |
Guennadi Liakhovetski | c2cdb7e | 2012-07-05 12:29:41 +0200 | [diff] [blame] | 677 | if (slave_id < 0 || !sg_len) { |
| 678 | dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", |
| 679 | __func__, sg_len, slave_id); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 680 | return NULL; |
| 681 | } |
| 682 | |
| 683 | slave_addr = ops->slave_addr(schan); |
| 684 | |
| 685 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 686 | direction, flags, false); |
| 687 | } |
| 688 | |
Vinod Koul | 877d842 | 2014-06-02 09:40:00 +0530 | [diff] [blame] | 689 | #define SHDMA_MAX_SG_LEN 32 |
| 690 | |
Vinod Koul | a687654 | 2014-06-02 09:22:03 +0530 | [diff] [blame] | 691 | static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 692 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
| 693 | size_t period_len, enum dma_transfer_direction direction, |
Laurent Pinchart | 31c1e5a | 2014-08-01 12:20:10 +0200 | [diff] [blame] | 694 | unsigned long flags) |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 695 | { |
| 696 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 697 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
Laurent Pinchart | 4415b03 | 2014-07-31 09:34:06 +0900 | [diff] [blame] | 698 | struct dma_async_tx_descriptor *desc; |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 699 | const struct shdma_ops *ops = sdev->ops; |
| 700 | unsigned int sg_len = buf_len / period_len; |
| 701 | int slave_id = schan->slave_id; |
| 702 | dma_addr_t slave_addr; |
Laurent Pinchart | 4415b03 | 2014-07-31 09:34:06 +0900 | [diff] [blame] | 703 | struct scatterlist *sgl; |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 704 | int i; |
| 705 | |
| 706 | if (!chan) |
| 707 | return NULL; |
| 708 | |
| 709 | BUG_ON(!schan->desc_num); |
| 710 | |
Vinod Koul | 877d842 | 2014-06-02 09:40:00 +0530 | [diff] [blame] | 711 | if (sg_len > SHDMA_MAX_SG_LEN) { |
| 712 | dev_err(schan->dev, "sg length %d exceds limit %d", |
| 713 | sg_len, SHDMA_MAX_SG_LEN); |
| 714 | return NULL; |
| 715 | } |
| 716 | |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 717 | /* Someone calling slave DMA on a generic channel? */ |
| 718 | if (slave_id < 0 || (buf_len < period_len)) { |
| 719 | dev_warn(schan->dev, |
Vinod Koul | 9d9f71a | 2014-06-02 09:32:59 +0530 | [diff] [blame] | 720 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 721 | __func__, buf_len, period_len, slave_id); |
| 722 | return NULL; |
| 723 | } |
| 724 | |
| 725 | slave_addr = ops->slave_addr(schan); |
| 726 | |
Laurent Pinchart | 4415b03 | 2014-07-31 09:34:06 +0900 | [diff] [blame] | 727 | /* |
| 728 | * Allocate the sg list dynamically as it would consumer too much stack |
| 729 | * space. |
| 730 | */ |
| 731 | sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL); |
| 732 | if (!sgl) |
| 733 | return NULL; |
| 734 | |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 735 | sg_init_table(sgl, sg_len); |
Laurent Pinchart | 4415b03 | 2014-07-31 09:34:06 +0900 | [diff] [blame] | 736 | |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 737 | for (i = 0; i < sg_len; i++) { |
| 738 | dma_addr_t src = buf_addr + (period_len * i); |
| 739 | |
| 740 | sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, |
| 741 | offset_in_page(src)); |
| 742 | sg_dma_address(&sgl[i]) = src; |
| 743 | sg_dma_len(&sgl[i]) = period_len; |
| 744 | } |
| 745 | |
Laurent Pinchart | 4415b03 | 2014-07-31 09:34:06 +0900 | [diff] [blame] | 746 | desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 747 | direction, flags, true); |
Laurent Pinchart | 4415b03 | 2014-07-31 09:34:06 +0900 | [diff] [blame] | 748 | |
| 749 | kfree(sgl); |
| 750 | return desc; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 751 | } |
| 752 | |
Maxime Ripard | be60f94 | 2014-11-17 14:42:33 +0100 | [diff] [blame] | 753 | static int shdma_terminate_all(struct dma_chan *chan) |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 754 | { |
| 755 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 756 | struct shdma_dev *sdev = to_shdma_dev(chan->device); |
| 757 | const struct shdma_ops *ops = sdev->ops; |
| 758 | unsigned long flags; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 759 | |
Maxime Ripard | be60f94 | 2014-11-17 14:42:33 +0100 | [diff] [blame] | 760 | spin_lock_irqsave(&schan->chan_lock, flags); |
| 761 | ops->halt_channel(schan); |
Guennadi Liakhovetski | 4f46f8a | 2012-07-30 21:28:27 +0200 | [diff] [blame] | 762 | |
Maxime Ripard | be60f94 | 2014-11-17 14:42:33 +0100 | [diff] [blame] | 763 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { |
| 764 | /* Record partial transfer */ |
| 765 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, |
| 766 | struct shdma_desc, node); |
| 767 | desc->partial = ops->get_partial(schan, desc); |
Guennadi Liakhovetski | 1ff8df4 | 2012-07-05 12:29:42 +0200 | [diff] [blame] | 768 | } |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 769 | |
Maxime Ripard | be60f94 | 2014-11-17 14:42:33 +0100 | [diff] [blame] | 770 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
| 771 | |
| 772 | shdma_chan_ld_cleanup(schan, true); |
| 773 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 774 | return 0; |
| 775 | } |
| 776 | |
Maxime Ripard | be60f94 | 2014-11-17 14:42:33 +0100 | [diff] [blame] | 777 | static int shdma_config(struct dma_chan *chan, |
| 778 | struct dma_slave_config *config) |
| 779 | { |
| 780 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 781 | |
| 782 | /* |
| 783 | * So far only .slave_id is used, but the slave drivers are |
| 784 | * encouraged to also set a transfer direction and an address. |
| 785 | */ |
| 786 | if (!config) |
| 787 | return -EINVAL; |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 788 | |
| 789 | /* |
| 790 | * overriding the slave_id through dma_slave_config is deprecated, |
| 791 | * but possibly some out-of-tree drivers still do it. |
| 792 | */ |
| 793 | if (WARN_ON_ONCE(config->slave_id && |
| 794 | config->slave_id != schan->real_slave_id)) |
| 795 | schan->real_slave_id = config->slave_id; |
| 796 | |
Maxime Ripard | be60f94 | 2014-11-17 14:42:33 +0100 | [diff] [blame] | 797 | /* |
| 798 | * We could lock this, but you shouldn't be configuring the |
| 799 | * channel, while using it... |
| 800 | */ |
Arnd Bergmann | 411fdaf | 2015-02-17 01:46:49 +0000 | [diff] [blame] | 801 | return shdma_setup_slave(schan, |
Maxime Ripard | be60f94 | 2014-11-17 14:42:33 +0100 | [diff] [blame] | 802 | config->direction == DMA_DEV_TO_MEM ? |
| 803 | config->src_addr : config->dst_addr); |
| 804 | } |
| 805 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 806 | static void shdma_issue_pending(struct dma_chan *chan) |
| 807 | { |
| 808 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 809 | |
| 810 | spin_lock_irq(&schan->chan_lock); |
| 811 | if (schan->pm_state == SHDMA_PM_ESTABLISHED) |
| 812 | shdma_chan_xfer_ld_queue(schan); |
| 813 | else |
| 814 | schan->pm_state = SHDMA_PM_PENDING; |
| 815 | spin_unlock_irq(&schan->chan_lock); |
| 816 | } |
| 817 | |
| 818 | static enum dma_status shdma_tx_status(struct dma_chan *chan, |
| 819 | dma_cookie_t cookie, |
| 820 | struct dma_tx_state *txstate) |
| 821 | { |
| 822 | struct shdma_chan *schan = to_shdma_chan(chan); |
| 823 | enum dma_status status; |
| 824 | unsigned long flags; |
| 825 | |
| 826 | shdma_chan_ld_cleanup(schan, false); |
| 827 | |
| 828 | spin_lock_irqsave(&schan->chan_lock, flags); |
| 829 | |
| 830 | status = dma_cookie_status(chan, cookie, txstate); |
| 831 | |
| 832 | /* |
| 833 | * If we don't find cookie on the queue, it has been aborted and we have |
| 834 | * to report error |
| 835 | */ |
Vinod Koul | a8d8d26 | 2013-10-16 21:04:06 +0530 | [diff] [blame] | 836 | if (status != DMA_COMPLETE) { |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 837 | struct shdma_desc *sdesc; |
| 838 | status = DMA_ERROR; |
| 839 | list_for_each_entry(sdesc, &schan->ld_queue, node) |
| 840 | if (sdesc->cookie == cookie) { |
| 841 | status = DMA_IN_PROGRESS; |
| 842 | break; |
| 843 | } |
| 844 | } |
| 845 | |
| 846 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
| 847 | |
| 848 | return status; |
| 849 | } |
| 850 | |
| 851 | /* Called from error IRQ or NMI */ |
| 852 | bool shdma_reset(struct shdma_dev *sdev) |
| 853 | { |
| 854 | const struct shdma_ops *ops = sdev->ops; |
| 855 | struct shdma_chan *schan; |
| 856 | unsigned int handled = 0; |
| 857 | int i; |
| 858 | |
| 859 | /* Reset all channels */ |
| 860 | shdma_for_each_chan(schan, sdev, i) { |
| 861 | struct shdma_desc *sdesc; |
| 862 | LIST_HEAD(dl); |
| 863 | |
| 864 | if (!schan) |
| 865 | continue; |
| 866 | |
| 867 | spin_lock(&schan->chan_lock); |
| 868 | |
| 869 | /* Stop the channel */ |
| 870 | ops->halt_channel(schan); |
| 871 | |
| 872 | list_splice_init(&schan->ld_queue, &dl); |
| 873 | |
| 874 | if (!list_empty(&dl)) { |
| 875 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); |
| 876 | pm_runtime_put(schan->dev); |
| 877 | } |
| 878 | schan->pm_state = SHDMA_PM_ESTABLISHED; |
| 879 | |
| 880 | spin_unlock(&schan->chan_lock); |
| 881 | |
| 882 | /* Complete all */ |
| 883 | list_for_each_entry(sdesc, &dl, node) { |
| 884 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; |
Dave Jiang | 73fc45e | 2016-07-20 14:13:09 -0700 | [diff] [blame] | 885 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 886 | sdesc->mark = DESC_IDLE; |
Dave Jiang | 73fc45e | 2016-07-20 14:13:09 -0700 | [diff] [blame] | 887 | dmaengine_desc_get_callback_invoke(tx, NULL); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 888 | } |
| 889 | |
| 890 | spin_lock(&schan->chan_lock); |
| 891 | list_splice(&dl, &schan->ld_free); |
| 892 | spin_unlock(&schan->chan_lock); |
| 893 | |
| 894 | handled++; |
| 895 | } |
| 896 | |
| 897 | return !!handled; |
| 898 | } |
| 899 | EXPORT_SYMBOL(shdma_reset); |
| 900 | |
| 901 | static irqreturn_t chan_irq(int irq, void *dev) |
| 902 | { |
| 903 | struct shdma_chan *schan = dev; |
| 904 | const struct shdma_ops *ops = |
| 905 | to_shdma_dev(schan->dma_chan.device)->ops; |
| 906 | irqreturn_t ret; |
| 907 | |
| 908 | spin_lock(&schan->chan_lock); |
| 909 | |
| 910 | ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; |
| 911 | |
| 912 | spin_unlock(&schan->chan_lock); |
| 913 | |
| 914 | return ret; |
| 915 | } |
| 916 | |
| 917 | static irqreturn_t chan_irqt(int irq, void *dev) |
| 918 | { |
| 919 | struct shdma_chan *schan = dev; |
| 920 | const struct shdma_ops *ops = |
| 921 | to_shdma_dev(schan->dma_chan.device)->ops; |
| 922 | struct shdma_desc *sdesc; |
| 923 | |
| 924 | spin_lock_irq(&schan->chan_lock); |
| 925 | list_for_each_entry(sdesc, &schan->ld_queue, node) { |
| 926 | if (sdesc->mark == DESC_SUBMITTED && |
| 927 | ops->desc_completed(schan, sdesc)) { |
| 928 | dev_dbg(schan->dev, "done #%d@%p\n", |
| 929 | sdesc->async_tx.cookie, &sdesc->async_tx); |
| 930 | sdesc->mark = DESC_COMPLETED; |
| 931 | break; |
| 932 | } |
| 933 | } |
| 934 | /* Next desc */ |
| 935 | shdma_chan_xfer_ld_queue(schan); |
| 936 | spin_unlock_irq(&schan->chan_lock); |
| 937 | |
| 938 | shdma_chan_ld_cleanup(schan, false); |
| 939 | |
| 940 | return IRQ_HANDLED; |
| 941 | } |
| 942 | |
| 943 | int shdma_request_irq(struct shdma_chan *schan, int irq, |
| 944 | unsigned long flags, const char *name) |
| 945 | { |
Guennadi Liakhovetski | c1c63a1 | 2013-07-02 17:45:55 +0200 | [diff] [blame] | 946 | int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, |
| 947 | chan_irqt, flags, name, schan); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 948 | |
| 949 | schan->irq = ret < 0 ? ret : irq; |
| 950 | |
| 951 | return ret; |
| 952 | } |
| 953 | EXPORT_SYMBOL(shdma_request_irq); |
| 954 | |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 955 | void shdma_chan_probe(struct shdma_dev *sdev, |
| 956 | struct shdma_chan *schan, int id) |
| 957 | { |
| 958 | schan->pm_state = SHDMA_PM_ESTABLISHED; |
| 959 | |
| 960 | /* reference struct dma_device */ |
| 961 | schan->dma_chan.device = &sdev->dma_dev; |
| 962 | dma_cookie_init(&schan->dma_chan); |
| 963 | |
| 964 | schan->dev = sdev->dma_dev.dev; |
| 965 | schan->id = id; |
| 966 | |
| 967 | if (!schan->max_xfer_len) |
| 968 | schan->max_xfer_len = PAGE_SIZE; |
| 969 | |
| 970 | spin_lock_init(&schan->chan_lock); |
| 971 | |
| 972 | /* Init descripter manage list */ |
| 973 | INIT_LIST_HEAD(&schan->ld_queue); |
| 974 | INIT_LIST_HEAD(&schan->ld_free); |
| 975 | |
| 976 | /* Add the channel to DMA device channel list */ |
| 977 | list_add_tail(&schan->dma_chan.device_node, |
| 978 | &sdev->dma_dev.channels); |
Maxime Ripard | 1e91647 | 2014-10-16 11:01:01 +0200 | [diff] [blame] | 979 | sdev->schan[id] = schan; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 980 | } |
| 981 | EXPORT_SYMBOL(shdma_chan_probe); |
| 982 | |
| 983 | void shdma_chan_remove(struct shdma_chan *schan) |
| 984 | { |
| 985 | list_del(&schan->dma_chan.device_node); |
| 986 | } |
| 987 | EXPORT_SYMBOL(shdma_chan_remove); |
| 988 | |
| 989 | int shdma_init(struct device *dev, struct shdma_dev *sdev, |
| 990 | int chan_num) |
| 991 | { |
| 992 | struct dma_device *dma_dev = &sdev->dma_dev; |
| 993 | |
| 994 | /* |
| 995 | * Require all call-backs for now, they can trivially be made optional |
| 996 | * later as required |
| 997 | */ |
| 998 | if (!sdev->ops || |
| 999 | !sdev->desc_size || |
| 1000 | !sdev->ops->embedded_desc || |
| 1001 | !sdev->ops->start_xfer || |
| 1002 | !sdev->ops->setup_xfer || |
| 1003 | !sdev->ops->set_slave || |
| 1004 | !sdev->ops->desc_setup || |
| 1005 | !sdev->ops->slave_addr || |
| 1006 | !sdev->ops->channel_busy || |
| 1007 | !sdev->ops->halt_channel || |
| 1008 | !sdev->ops->desc_completed) |
| 1009 | return -EINVAL; |
| 1010 | |
| 1011 | sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); |
| 1012 | if (!sdev->schan) |
| 1013 | return -ENOMEM; |
| 1014 | |
| 1015 | INIT_LIST_HEAD(&dma_dev->channels); |
| 1016 | |
| 1017 | /* Common and MEMCPY operations */ |
| 1018 | dma_dev->device_alloc_chan_resources |
| 1019 | = shdma_alloc_chan_resources; |
| 1020 | dma_dev->device_free_chan_resources = shdma_free_chan_resources; |
| 1021 | dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; |
| 1022 | dma_dev->device_tx_status = shdma_tx_status; |
| 1023 | dma_dev->device_issue_pending = shdma_issue_pending; |
| 1024 | |
| 1025 | /* Compulsory for DMA_SLAVE fields */ |
| 1026 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; |
Kuninori Morimoto | dfbb85c | 2014-04-02 20:17:00 -0700 | [diff] [blame] | 1027 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; |
Maxime Ripard | be60f94 | 2014-11-17 14:42:33 +0100 | [diff] [blame] | 1028 | dma_dev->device_config = shdma_config; |
| 1029 | dma_dev->device_terminate_all = shdma_terminate_all; |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 1030 | |
| 1031 | dma_dev->dev = dev; |
| 1032 | |
| 1033 | return 0; |
| 1034 | } |
| 1035 | EXPORT_SYMBOL(shdma_init); |
| 1036 | |
| 1037 | void shdma_cleanup(struct shdma_dev *sdev) |
| 1038 | { |
| 1039 | kfree(sdev->schan); |
| 1040 | } |
| 1041 | EXPORT_SYMBOL(shdma_cleanup); |
| 1042 | |
| 1043 | static int __init shdma_enter(void) |
| 1044 | { |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 1045 | shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG), |
| 1046 | sizeof(long), |
| 1047 | GFP_KERNEL); |
Guennadi Liakhovetski | 9a7b8e0 | 2012-05-09 17:09:13 +0200 | [diff] [blame] | 1048 | if (!shdma_slave_used) |
| 1049 | return -ENOMEM; |
| 1050 | return 0; |
| 1051 | } |
| 1052 | module_init(shdma_enter); |
| 1053 | |
| 1054 | static void __exit shdma_exit(void) |
| 1055 | { |
| 1056 | kfree(shdma_slave_used); |
| 1057 | } |
| 1058 | module_exit(shdma_exit); |
| 1059 | |
| 1060 | MODULE_LICENSE("GPL v2"); |
| 1061 | MODULE_DESCRIPTION("SH-DMA driver base library"); |
| 1062 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); |