Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ |
| 3 | #include <linux/init.h> |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/pci.h> |
| 7 | #include <linux/device.h> |
| 8 | #include <linux/io-64-nonatomic-lo-hi.h> |
| 9 | #include <linux/dmaengine.h> |
| 10 | #include <uapi/linux/idxd.h> |
| 11 | #include "../dmaengine.h" |
| 12 | #include "registers.h" |
| 13 | #include "idxd.h" |
| 14 | |
| 15 | static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) |
| 16 | { |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 17 | struct idxd_dma_chan *idxd_chan; |
| 18 | |
| 19 | idxd_chan = container_of(c, struct idxd_dma_chan, chan); |
| 20 | return idxd_chan->wq; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 21 | } |
| 22 | |
| 23 | void idxd_dma_complete_txd(struct idxd_desc *desc, |
| 24 | enum idxd_complete_type comp_type) |
| 25 | { |
| 26 | struct dma_async_tx_descriptor *tx; |
| 27 | struct dmaengine_result res; |
| 28 | int complete = 1; |
| 29 | |
| 30 | if (desc->completion->status == DSA_COMP_SUCCESS) |
| 31 | res.result = DMA_TRANS_NOERROR; |
| 32 | else if (desc->completion->status) |
| 33 | res.result = DMA_TRANS_WRITE_FAILED; |
| 34 | else if (comp_type == IDXD_COMPLETE_ABORT) |
| 35 | res.result = DMA_TRANS_ABORTED; |
| 36 | else |
| 37 | complete = 0; |
| 38 | |
| 39 | tx = &desc->txd; |
| 40 | if (complete && tx->cookie) { |
| 41 | dma_cookie_complete(tx); |
| 42 | dma_descriptor_unmap(tx); |
| 43 | dmaengine_desc_get_callback_invoke(tx, &res); |
| 44 | tx->callback = NULL; |
| 45 | tx->callback_result = NULL; |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | static void op_flag_setup(unsigned long flags, u32 *desc_flags) |
| 50 | { |
| 51 | *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR; |
| 52 | if (flags & DMA_PREP_INTERRUPT) |
| 53 | *desc_flags |= IDXD_OP_FLAG_RCI; |
| 54 | } |
| 55 | |
| 56 | static inline void set_completion_address(struct idxd_desc *desc, |
| 57 | u64 *compl_addr) |
| 58 | { |
| 59 | *compl_addr = desc->compl_dma; |
| 60 | } |
| 61 | |
| 62 | static inline void idxd_prep_desc_common(struct idxd_wq *wq, |
| 63 | struct dsa_hw_desc *hw, char opcode, |
| 64 | u64 addr_f1, u64 addr_f2, u64 len, |
| 65 | u64 compl, u32 flags) |
| 66 | { |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 67 | hw->flags = flags; |
| 68 | hw->opcode = opcode; |
| 69 | hw->src_addr = addr_f1; |
| 70 | hw->dst_addr = addr_f2; |
| 71 | hw->xfer_size = len; |
Dave Jiang | d807132 | 2021-08-19 09:34:06 -0700 | [diff] [blame] | 72 | /* |
| 73 | * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv |
| 74 | * field instead. This field should be set to 1 for kernel descriptors. |
| 75 | */ |
| 76 | hw->priv = 1; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 77 | hw->completion_addr = compl; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 78 | } |
| 79 | |
| 80 | static struct dma_async_tx_descriptor * |
| 81 | idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, |
| 82 | dma_addr_t dma_src, size_t len, unsigned long flags) |
| 83 | { |
| 84 | struct idxd_wq *wq = to_idxd_wq(c); |
| 85 | u32 desc_flags; |
| 86 | struct idxd_device *idxd = wq->idxd; |
| 87 | struct idxd_desc *desc; |
| 88 | |
| 89 | if (wq->state != IDXD_WQ_ENABLED) |
| 90 | return NULL; |
| 91 | |
| 92 | if (len > idxd->max_xfer_bytes) |
| 93 | return NULL; |
| 94 | |
| 95 | op_flag_setup(flags, &desc_flags); |
| 96 | desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); |
| 97 | if (IS_ERR(desc)) |
| 98 | return NULL; |
| 99 | |
| 100 | idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE, |
| 101 | dma_src, dma_dest, len, desc->compl_dma, |
| 102 | desc_flags); |
| 103 | |
| 104 | desc->txd.flags = flags; |
| 105 | |
| 106 | return &desc->txd; |
| 107 | } |
| 108 | |
| 109 | static int idxd_dma_alloc_chan_resources(struct dma_chan *chan) |
| 110 | { |
| 111 | struct idxd_wq *wq = to_idxd_wq(chan); |
| 112 | struct device *dev = &wq->idxd->pdev->dev; |
| 113 | |
| 114 | idxd_wq_get(wq); |
| 115 | dev_dbg(dev, "%s: client_count: %d\n", __func__, |
| 116 | idxd_wq_refcount(wq)); |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | static void idxd_dma_free_chan_resources(struct dma_chan *chan) |
| 121 | { |
| 122 | struct idxd_wq *wq = to_idxd_wq(chan); |
| 123 | struct device *dev = &wq->idxd->pdev->dev; |
| 124 | |
| 125 | idxd_wq_put(wq); |
| 126 | dev_dbg(dev, "%s: client_count: %d\n", __func__, |
| 127 | idxd_wq_refcount(wq)); |
| 128 | } |
| 129 | |
| 130 | static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, |
| 131 | dma_cookie_t cookie, |
| 132 | struct dma_tx_state *txstate) |
| 133 | { |
Dave Jiang | 47ec7f09 | 2020-05-13 11:47:49 -0700 | [diff] [blame] | 134 | return DMA_OUT_OF_ORDER; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | /* |
| 138 | * issue_pending() does not need to do anything since tx_submit() does the job |
| 139 | * already. |
| 140 | */ |
| 141 | static void idxd_dma_issue_pending(struct dma_chan *dma_chan) |
| 142 | { |
| 143 | } |
| 144 | |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 145 | static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 146 | { |
| 147 | struct dma_chan *c = tx->chan; |
| 148 | struct idxd_wq *wq = to_idxd_wq(c); |
| 149 | dma_cookie_t cookie; |
| 150 | int rc; |
| 151 | struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd); |
| 152 | |
| 153 | cookie = dma_cookie_assign(tx); |
| 154 | |
| 155 | rc = idxd_submit_desc(wq, desc); |
Dave Jiang | 0b030f5 | 2021-08-06 10:37:40 -0700 | [diff] [blame] | 156 | if (rc < 0) |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 157 | return rc; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 158 | |
| 159 | return cookie; |
| 160 | } |
| 161 | |
| 162 | static void idxd_dma_release(struct dma_device *device) |
| 163 | { |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 164 | struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma); |
| 165 | |
| 166 | kfree(idxd_dma); |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | int idxd_register_dma_device(struct idxd_device *idxd) |
| 170 | { |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 171 | struct idxd_dma_dev *idxd_dma; |
| 172 | struct dma_device *dma; |
| 173 | struct device *dev = &idxd->pdev->dev; |
| 174 | int rc; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 175 | |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 176 | idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev)); |
| 177 | if (!idxd_dma) |
| 178 | return -ENOMEM; |
| 179 | |
| 180 | dma = &idxd_dma->dma; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 181 | INIT_LIST_HEAD(&dma->channels); |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 182 | dma->dev = dev; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 183 | |
Dave Jiang | c06e424 | 2021-01-15 14:53:07 -0700 | [diff] [blame] | 184 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); |
Dave Jiang | 47ec7f09 | 2020-05-13 11:47:49 -0700 | [diff] [blame] | 185 | dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 186 | dma->device_release = idxd_dma_release; |
| 187 | |
| 188 | if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { |
| 189 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
| 190 | dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; |
| 191 | } |
| 192 | |
| 193 | dma->device_tx_status = idxd_dma_tx_status; |
| 194 | dma->device_issue_pending = idxd_dma_issue_pending; |
| 195 | dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; |
| 196 | dma->device_free_chan_resources = idxd_dma_free_chan_resources; |
| 197 | |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 198 | rc = dma_async_device_register(dma); |
| 199 | if (rc < 0) { |
| 200 | kfree(idxd_dma); |
| 201 | return rc; |
| 202 | } |
| 203 | |
| 204 | idxd_dma->idxd = idxd; |
| 205 | /* |
| 206 | * This pointer is protected by the refs taken by the dma_chan. It will remain valid |
| 207 | * as long as there are outstanding channels. |
| 208 | */ |
| 209 | idxd->idxd_dma = idxd_dma; |
| 210 | return 0; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | void idxd_unregister_dma_device(struct idxd_device *idxd) |
| 214 | { |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 215 | dma_async_device_unregister(&idxd->idxd_dma->dma); |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | int idxd_register_dma_channel(struct idxd_wq *wq) |
| 219 | { |
| 220 | struct idxd_device *idxd = wq->idxd; |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 221 | struct dma_device *dma = &idxd->idxd_dma->dma; |
| 222 | struct device *dev = &idxd->pdev->dev; |
| 223 | struct idxd_dma_chan *idxd_chan; |
| 224 | struct dma_chan *chan; |
| 225 | int rc, i; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 226 | |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 227 | idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev)); |
| 228 | if (!idxd_chan) |
| 229 | return -ENOMEM; |
| 230 | |
| 231 | chan = &idxd_chan->chan; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 232 | chan->device = dma; |
| 233 | list_add_tail(&chan->device_node, &dma->channels); |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 234 | |
| 235 | for (i = 0; i < wq->num_descs; i++) { |
| 236 | struct idxd_desc *desc = wq->descs[i]; |
| 237 | |
| 238 | dma_async_tx_descriptor_init(&desc->txd, chan); |
| 239 | desc->txd.tx_submit = idxd_dma_tx_submit; |
| 240 | } |
| 241 | |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 242 | rc = dma_async_device_channel_register(dma, chan); |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 243 | if (rc < 0) { |
| 244 | kfree(idxd_chan); |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 245 | return rc; |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | wq->idxd_chan = idxd_chan; |
| 249 | idxd_chan->wq = wq; |
Dave Jiang | 700af3a | 2021-07-15 11:43:20 -0700 | [diff] [blame] | 250 | get_device(wq_confdev(wq)); |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 251 | |
| 252 | return 0; |
| 253 | } |
| 254 | |
| 255 | void idxd_unregister_dma_channel(struct idxd_wq *wq) |
| 256 | { |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 257 | struct idxd_dma_chan *idxd_chan = wq->idxd_chan; |
| 258 | struct dma_chan *chan = &idxd_chan->chan; |
| 259 | struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma; |
Dave Jiang | e594443 | 2021-01-18 10:28:44 -0700 | [diff] [blame] | 260 | |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 261 | dma_async_device_channel_unregister(&idxd_dma->dma, chan); |
Dave Jiang | e594443 | 2021-01-18 10:28:44 -0700 | [diff] [blame] | 262 | list_del(&chan->device_node); |
Dave Jiang | 3978628 | 2021-04-15 16:37:10 -0700 | [diff] [blame] | 263 | kfree(wq->idxd_chan); |
| 264 | wq->idxd_chan = NULL; |
Dave Jiang | 700af3a | 2021-07-15 11:43:20 -0700 | [diff] [blame] | 265 | put_device(wq_confdev(wq)); |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 266 | } |
Dave Jiang | 0cda4f6 | 2021-07-15 11:44:30 -0700 | [diff] [blame] | 267 | |
| 268 | static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) |
| 269 | { |
| 270 | struct device *dev = &idxd_dev->conf_dev; |
| 271 | struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); |
| 272 | struct idxd_device *idxd = wq->idxd; |
| 273 | int rc; |
| 274 | |
| 275 | if (idxd->state != IDXD_DEV_ENABLED) |
| 276 | return -ENXIO; |
| 277 | |
| 278 | mutex_lock(&wq->wq_lock); |
| 279 | wq->type = IDXD_WQT_KERNEL; |
| 280 | rc = __drv_enable_wq(wq); |
| 281 | if (rc < 0) { |
| 282 | dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); |
| 283 | rc = -ENXIO; |
| 284 | goto err; |
| 285 | } |
| 286 | |
| 287 | rc = idxd_wq_alloc_resources(wq); |
| 288 | if (rc < 0) { |
Dave Jiang | 125d1037 | 2021-07-20 13:42:15 -0700 | [diff] [blame] | 289 | idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; |
Dave Jiang | 0cda4f6 | 2021-07-15 11:44:30 -0700 | [diff] [blame] | 290 | dev_dbg(dev, "WQ resource alloc failed\n"); |
| 291 | goto err_res_alloc; |
| 292 | } |
| 293 | |
| 294 | rc = idxd_wq_init_percpu_ref(wq); |
| 295 | if (rc < 0) { |
Dave Jiang | 125d1037 | 2021-07-20 13:42:15 -0700 | [diff] [blame] | 296 | idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; |
Dave Jiang | 0cda4f6 | 2021-07-15 11:44:30 -0700 | [diff] [blame] | 297 | dev_dbg(dev, "percpu_ref setup failed\n"); |
| 298 | goto err_ref; |
| 299 | } |
| 300 | |
| 301 | rc = idxd_register_dma_channel(wq); |
| 302 | if (rc < 0) { |
Dave Jiang | 125d1037 | 2021-07-20 13:42:15 -0700 | [diff] [blame] | 303 | idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR; |
Dave Jiang | 0cda4f6 | 2021-07-15 11:44:30 -0700 | [diff] [blame] | 304 | dev_dbg(dev, "Failed to register dma channel\n"); |
| 305 | goto err_dma; |
| 306 | } |
| 307 | |
Dave Jiang | 125d1037 | 2021-07-20 13:42:15 -0700 | [diff] [blame] | 308 | idxd->cmd_status = 0; |
Dave Jiang | 0cda4f6 | 2021-07-15 11:44:30 -0700 | [diff] [blame] | 309 | mutex_unlock(&wq->wq_lock); |
| 310 | return 0; |
| 311 | |
| 312 | err_dma: |
| 313 | idxd_wq_quiesce(wq); |
Dave Jiang | 85f604a | 2021-09-29 12:15:38 -0700 | [diff] [blame] | 314 | percpu_ref_exit(&wq->wq_active); |
Dave Jiang | 0cda4f6 | 2021-07-15 11:44:30 -0700 | [diff] [blame] | 315 | err_ref: |
| 316 | idxd_wq_free_resources(wq); |
| 317 | err_res_alloc: |
| 318 | __drv_disable_wq(wq); |
| 319 | err: |
| 320 | wq->type = IDXD_WQT_NONE; |
| 321 | mutex_unlock(&wq->wq_lock); |
| 322 | return rc; |
| 323 | } |
| 324 | |
| 325 | static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) |
| 326 | { |
| 327 | struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); |
| 328 | |
| 329 | mutex_lock(&wq->wq_lock); |
| 330 | idxd_wq_quiesce(wq); |
| 331 | idxd_unregister_dma_channel(wq); |
Dave Jiang | a3e340c | 2021-10-25 08:01:04 -0700 | [diff] [blame] | 332 | idxd_wq_free_resources(wq); |
Dave Jiang | 0cda4f6 | 2021-07-15 11:44:30 -0700 | [diff] [blame] | 333 | __drv_disable_wq(wq); |
Dave Jiang | 85f604a | 2021-09-29 12:15:38 -0700 | [diff] [blame] | 334 | percpu_ref_exit(&wq->wq_active); |
Dave Jiang | 0cda4f6 | 2021-07-15 11:44:30 -0700 | [diff] [blame] | 335 | mutex_unlock(&wq->wq_lock); |
| 336 | } |
| 337 | |
| 338 | static enum idxd_dev_type dev_types[] = { |
| 339 | IDXD_DEV_WQ, |
| 340 | IDXD_DEV_NONE, |
| 341 | }; |
| 342 | |
| 343 | struct idxd_device_driver idxd_dmaengine_drv = { |
| 344 | .probe = idxd_dmaengine_drv_probe, |
| 345 | .remove = idxd_dmaengine_drv_remove, |
| 346 | .name = "dmaengine", |
| 347 | .type = dev_types, |
| 348 | }; |
Dave Jiang | 6e7f3ee9 | 2021-07-15 11:44:47 -0700 | [diff] [blame] | 349 | EXPORT_SYMBOL_GPL(idxd_dmaengine_drv); |