blob: c39e9483206adec9b7917dbf192f3989ddcf1567 [file] [log] [blame]
Dave Jiang8f47d1a2020-01-21 16:44:23 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <linux/dmaengine.h>
10#include <uapi/linux/idxd.h>
11#include "../dmaengine.h"
12#include "registers.h"
13#include "idxd.h"
14
15static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
16{
Dave Jiang39786282021-04-15 16:37:10 -070017 struct idxd_dma_chan *idxd_chan;
18
19 idxd_chan = container_of(c, struct idxd_dma_chan, chan);
20 return idxd_chan->wq;
Dave Jiang8f47d1a2020-01-21 16:44:23 -070021}
22
23void idxd_dma_complete_txd(struct idxd_desc *desc,
24 enum idxd_complete_type comp_type)
25{
26 struct dma_async_tx_descriptor *tx;
27 struct dmaengine_result res;
28 int complete = 1;
29
30 if (desc->completion->status == DSA_COMP_SUCCESS)
31 res.result = DMA_TRANS_NOERROR;
32 else if (desc->completion->status)
33 res.result = DMA_TRANS_WRITE_FAILED;
34 else if (comp_type == IDXD_COMPLETE_ABORT)
35 res.result = DMA_TRANS_ABORTED;
36 else
37 complete = 0;
38
39 tx = &desc->txd;
40 if (complete && tx->cookie) {
41 dma_cookie_complete(tx);
42 dma_descriptor_unmap(tx);
43 dmaengine_desc_get_callback_invoke(tx, &res);
44 tx->callback = NULL;
45 tx->callback_result = NULL;
46 }
47}
48
49static void op_flag_setup(unsigned long flags, u32 *desc_flags)
50{
51 *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
52 if (flags & DMA_PREP_INTERRUPT)
53 *desc_flags |= IDXD_OP_FLAG_RCI;
54}
55
56static inline void set_completion_address(struct idxd_desc *desc,
57 u64 *compl_addr)
58{
59 *compl_addr = desc->compl_dma;
60}
61
62static inline void idxd_prep_desc_common(struct idxd_wq *wq,
63 struct dsa_hw_desc *hw, char opcode,
64 u64 addr_f1, u64 addr_f2, u64 len,
65 u64 compl, u32 flags)
66{
Dave Jiang8f47d1a2020-01-21 16:44:23 -070067 hw->flags = flags;
68 hw->opcode = opcode;
69 hw->src_addr = addr_f1;
70 hw->dst_addr = addr_f2;
71 hw->xfer_size = len;
Dave Jiangd8071322021-08-19 09:34:06 -070072 /*
73 * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
74 * field instead. This field should be set to 1 for kernel descriptors.
75 */
76 hw->priv = 1;
Dave Jiang8f47d1a2020-01-21 16:44:23 -070077 hw->completion_addr = compl;
Dave Jiang8f47d1a2020-01-21 16:44:23 -070078}
79
80static struct dma_async_tx_descriptor *
81idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
82 dma_addr_t dma_src, size_t len, unsigned long flags)
83{
84 struct idxd_wq *wq = to_idxd_wq(c);
85 u32 desc_flags;
86 struct idxd_device *idxd = wq->idxd;
87 struct idxd_desc *desc;
88
89 if (wq->state != IDXD_WQ_ENABLED)
90 return NULL;
91
92 if (len > idxd->max_xfer_bytes)
93 return NULL;
94
95 op_flag_setup(flags, &desc_flags);
96 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
97 if (IS_ERR(desc))
98 return NULL;
99
100 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
101 dma_src, dma_dest, len, desc->compl_dma,
102 desc_flags);
103
104 desc->txd.flags = flags;
105
106 return &desc->txd;
107}
108
109static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
110{
111 struct idxd_wq *wq = to_idxd_wq(chan);
112 struct device *dev = &wq->idxd->pdev->dev;
113
114 idxd_wq_get(wq);
115 dev_dbg(dev, "%s: client_count: %d\n", __func__,
116 idxd_wq_refcount(wq));
117 return 0;
118}
119
120static void idxd_dma_free_chan_resources(struct dma_chan *chan)
121{
122 struct idxd_wq *wq = to_idxd_wq(chan);
123 struct device *dev = &wq->idxd->pdev->dev;
124
125 idxd_wq_put(wq);
126 dev_dbg(dev, "%s: client_count: %d\n", __func__,
127 idxd_wq_refcount(wq));
128}
129
130static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
131 dma_cookie_t cookie,
132 struct dma_tx_state *txstate)
133{
Dave Jiang47ec7f092020-05-13 11:47:49 -0700134 return DMA_OUT_OF_ORDER;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700135}
136
137/*
138 * issue_pending() does not need to do anything since tx_submit() does the job
139 * already.
140 */
141static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
142{
143}
144
Dave Jiang39786282021-04-15 16:37:10 -0700145static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700146{
147 struct dma_chan *c = tx->chan;
148 struct idxd_wq *wq = to_idxd_wq(c);
149 dma_cookie_t cookie;
150 int rc;
151 struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
152
153 cookie = dma_cookie_assign(tx);
154
155 rc = idxd_submit_desc(wq, desc);
Dave Jiang0b030f52021-08-06 10:37:40 -0700156 if (rc < 0)
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700157 return rc;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700158
159 return cookie;
160}
161
162static void idxd_dma_release(struct dma_device *device)
163{
Dave Jiang39786282021-04-15 16:37:10 -0700164 struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
165
166 kfree(idxd_dma);
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700167}
168
169int idxd_register_dma_device(struct idxd_device *idxd)
170{
Dave Jiang39786282021-04-15 16:37:10 -0700171 struct idxd_dma_dev *idxd_dma;
172 struct dma_device *dma;
173 struct device *dev = &idxd->pdev->dev;
174 int rc;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700175
Dave Jiang39786282021-04-15 16:37:10 -0700176 idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
177 if (!idxd_dma)
178 return -ENOMEM;
179
180 dma = &idxd_dma->dma;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700181 INIT_LIST_HEAD(&dma->channels);
Dave Jiang39786282021-04-15 16:37:10 -0700182 dma->dev = dev;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700183
Dave Jiangc06e4242021-01-15 14:53:07 -0700184 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
Dave Jiang47ec7f092020-05-13 11:47:49 -0700185 dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700186 dma->device_release = idxd_dma_release;
187
188 if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
189 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
190 dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
191 }
192
193 dma->device_tx_status = idxd_dma_tx_status;
194 dma->device_issue_pending = idxd_dma_issue_pending;
195 dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
196 dma->device_free_chan_resources = idxd_dma_free_chan_resources;
197
Dave Jiang39786282021-04-15 16:37:10 -0700198 rc = dma_async_device_register(dma);
199 if (rc < 0) {
200 kfree(idxd_dma);
201 return rc;
202 }
203
204 idxd_dma->idxd = idxd;
205 /*
206 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
207 * as long as there are outstanding channels.
208 */
209 idxd->idxd_dma = idxd_dma;
210 return 0;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700211}
212
213void idxd_unregister_dma_device(struct idxd_device *idxd)
214{
Dave Jiang39786282021-04-15 16:37:10 -0700215 dma_async_device_unregister(&idxd->idxd_dma->dma);
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700216}
217
218int idxd_register_dma_channel(struct idxd_wq *wq)
219{
220 struct idxd_device *idxd = wq->idxd;
Dave Jiang39786282021-04-15 16:37:10 -0700221 struct dma_device *dma = &idxd->idxd_dma->dma;
222 struct device *dev = &idxd->pdev->dev;
223 struct idxd_dma_chan *idxd_chan;
224 struct dma_chan *chan;
225 int rc, i;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700226
Dave Jiang39786282021-04-15 16:37:10 -0700227 idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
228 if (!idxd_chan)
229 return -ENOMEM;
230
231 chan = &idxd_chan->chan;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700232 chan->device = dma;
233 list_add_tail(&chan->device_node, &dma->channels);
Dave Jiang39786282021-04-15 16:37:10 -0700234
235 for (i = 0; i < wq->num_descs; i++) {
236 struct idxd_desc *desc = wq->descs[i];
237
238 dma_async_tx_descriptor_init(&desc->txd, chan);
239 desc->txd.tx_submit = idxd_dma_tx_submit;
240 }
241
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700242 rc = dma_async_device_channel_register(dma, chan);
Dave Jiang39786282021-04-15 16:37:10 -0700243 if (rc < 0) {
244 kfree(idxd_chan);
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700245 return rc;
Dave Jiang39786282021-04-15 16:37:10 -0700246 }
247
248 wq->idxd_chan = idxd_chan;
249 idxd_chan->wq = wq;
Dave Jiang700af3a2021-07-15 11:43:20 -0700250 get_device(wq_confdev(wq));
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700251
252 return 0;
253}
254
255void idxd_unregister_dma_channel(struct idxd_wq *wq)
256{
Dave Jiang39786282021-04-15 16:37:10 -0700257 struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
258 struct dma_chan *chan = &idxd_chan->chan;
259 struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
Dave Jiange5944432021-01-18 10:28:44 -0700260
Dave Jiang39786282021-04-15 16:37:10 -0700261 dma_async_device_channel_unregister(&idxd_dma->dma, chan);
Dave Jiange5944432021-01-18 10:28:44 -0700262 list_del(&chan->device_node);
Dave Jiang39786282021-04-15 16:37:10 -0700263 kfree(wq->idxd_chan);
264 wq->idxd_chan = NULL;
Dave Jiang700af3a2021-07-15 11:43:20 -0700265 put_device(wq_confdev(wq));
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700266}
Dave Jiang0cda4f62021-07-15 11:44:30 -0700267
268static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
269{
270 struct device *dev = &idxd_dev->conf_dev;
271 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
272 struct idxd_device *idxd = wq->idxd;
273 int rc;
274
275 if (idxd->state != IDXD_DEV_ENABLED)
276 return -ENXIO;
277
278 mutex_lock(&wq->wq_lock);
279 wq->type = IDXD_WQT_KERNEL;
280 rc = __drv_enable_wq(wq);
281 if (rc < 0) {
282 dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
283 rc = -ENXIO;
284 goto err;
285 }
286
287 rc = idxd_wq_alloc_resources(wq);
288 if (rc < 0) {
Dave Jiang125d10372021-07-20 13:42:15 -0700289 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
Dave Jiang0cda4f62021-07-15 11:44:30 -0700290 dev_dbg(dev, "WQ resource alloc failed\n");
291 goto err_res_alloc;
292 }
293
294 rc = idxd_wq_init_percpu_ref(wq);
295 if (rc < 0) {
Dave Jiang125d10372021-07-20 13:42:15 -0700296 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
Dave Jiang0cda4f62021-07-15 11:44:30 -0700297 dev_dbg(dev, "percpu_ref setup failed\n");
298 goto err_ref;
299 }
300
301 rc = idxd_register_dma_channel(wq);
302 if (rc < 0) {
Dave Jiang125d10372021-07-20 13:42:15 -0700303 idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
Dave Jiang0cda4f62021-07-15 11:44:30 -0700304 dev_dbg(dev, "Failed to register dma channel\n");
305 goto err_dma;
306 }
307
Dave Jiang125d10372021-07-20 13:42:15 -0700308 idxd->cmd_status = 0;
Dave Jiang0cda4f62021-07-15 11:44:30 -0700309 mutex_unlock(&wq->wq_lock);
310 return 0;
311
312err_dma:
313 idxd_wq_quiesce(wq);
Dave Jiang85f604a2021-09-29 12:15:38 -0700314 percpu_ref_exit(&wq->wq_active);
Dave Jiang0cda4f62021-07-15 11:44:30 -0700315err_ref:
316 idxd_wq_free_resources(wq);
317err_res_alloc:
318 __drv_disable_wq(wq);
319err:
320 wq->type = IDXD_WQT_NONE;
321 mutex_unlock(&wq->wq_lock);
322 return rc;
323}
324
325static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
326{
327 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
328
329 mutex_lock(&wq->wq_lock);
330 idxd_wq_quiesce(wq);
331 idxd_unregister_dma_channel(wq);
Dave Jianga3e340c2021-10-25 08:01:04 -0700332 idxd_wq_free_resources(wq);
Dave Jiang0cda4f62021-07-15 11:44:30 -0700333 __drv_disable_wq(wq);
Dave Jiang85f604a2021-09-29 12:15:38 -0700334 percpu_ref_exit(&wq->wq_active);
Dave Jiang0cda4f62021-07-15 11:44:30 -0700335 mutex_unlock(&wq->wq_lock);
336}
337
338static enum idxd_dev_type dev_types[] = {
339 IDXD_DEV_WQ,
340 IDXD_DEV_NONE,
341};
342
343struct idxd_device_driver idxd_dmaengine_drv = {
344 .probe = idxd_dmaengine_drv_probe,
345 .remove = idxd_dmaengine_drv_remove,
346 .name = "dmaengine",
347 .type = dev_types,
348};
Dave Jiang6e7f3ee92021-07-15 11:44:47 -0700349EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);