blob: 83452fbbb168b156ff75f481124198564c3c21d3 [file] [log] [blame]
Dave Jiangd1dfe5b2020-01-21 16:44:17 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <uapi/linux/idxd.h>
8#include "idxd.h"
9#include "registers.h"
10
Dave Jiang07051072020-06-15 13:54:26 -070011static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
Dave Jiangd1dfe5b2020-01-21 16:44:17 -070012{
13 struct idxd_desc *desc;
Dave Jiang8e50d392020-10-27 10:34:35 -070014 struct idxd_device *idxd = wq->idxd;
Dave Jiangd1dfe5b2020-01-21 16:44:17 -070015
16 desc = wq->descs[idx];
17 memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
Dave Jiang435b5122021-04-15 16:38:09 -070018 memset(desc->completion, 0, idxd->data->compl_size);
Dave Jiang07051072020-06-15 13:54:26 -070019 desc->cpu = cpu;
Dave Jiang8e50d392020-10-27 10:34:35 -070020
21 if (device_pasid_enabled(idxd))
22 desc->hw->pasid = idxd->pasid;
23
24 /*
Dave Jiang6cfd9e62021-06-24 13:39:33 -070025 * On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
26 * vector 1:1 to the WQ id, we need to add 1
Dave Jiang8e50d392020-10-27 10:34:35 -070027 */
Dave Jiang6cfd9e62021-06-24 13:39:33 -070028 if (!idxd->int_handles)
29 desc->hw->int_handle = wq->id + 1;
30 else
31 desc->hw->int_handle = idxd->int_handles[wq->id];
Dave Jiangeb15e712021-04-20 11:46:34 -070032
Dave Jiangd1dfe5b2020-01-21 16:44:17 -070033 return desc;
34}
35
Dave Jiang07051072020-06-15 13:54:26 -070036struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
37{
38 int cpu, idx;
39 struct idxd_device *idxd = wq->idxd;
40 DEFINE_SBQ_WAIT(wait);
41 struct sbq_wait_state *ws;
42 struct sbitmap_queue *sbq;
43
44 if (idxd->state != IDXD_DEV_ENABLED)
45 return ERR_PTR(-EIO);
46
47 sbq = &wq->sbq;
48 idx = sbitmap_queue_get(sbq, &cpu);
49 if (idx < 0) {
50 if (optype == IDXD_OP_NONBLOCK)
51 return ERR_PTR(-EAGAIN);
52 } else {
53 return __get_desc(wq, idx, cpu);
54 }
55
56 ws = &sbq->ws[0];
57 for (;;) {
58 sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE);
59 if (signal_pending_state(TASK_INTERRUPTIBLE, current))
60 break;
61 idx = sbitmap_queue_get(sbq, &cpu);
Dave Jiang673d8122021-07-22 10:54:10 -070062 if (idx >= 0)
Dave Jiang07051072020-06-15 13:54:26 -070063 break;
64 schedule();
65 }
66
67 sbitmap_finish_wait(sbq, ws, &wait);
68 if (idx < 0)
69 return ERR_PTR(-EAGAIN);
70
71 return __get_desc(wq, idx, cpu);
72}
73
Dave Jiangd1dfe5b2020-01-21 16:44:17 -070074void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
75{
Dave Jiang07051072020-06-15 13:54:26 -070076 int cpu = desc->cpu;
Dave Jiangd1dfe5b2020-01-21 16:44:17 -070077
Dave Jiang07051072020-06-15 13:54:26 -070078 desc->cpu = -1;
79 sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
Dave Jiangd1dfe5b2020-01-21 16:44:17 -070080}
81
Dave Jiang6b4b87f2021-07-14 11:50:06 -070082static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
83 struct idxd_desc *desc)
84{
85 struct idxd_desc *d, *n;
86
87 lockdep_assert_held(&ie->list_lock);
88 list_for_each_entry_safe(d, n, &ie->work_list, list) {
89 if (d == desc) {
90 list_del(&d->list);
91 return d;
92 }
93 }
94
95 /*
96 * At this point, the desc needs to be aborted is held by the completion
97 * handler where it has taken it off the pending list but has not added to the
98 * work list. It will be cleaned up by the interrupt handler when it sees the
99 * IDXD_COMP_DESC_ABORT for completion status.
100 */
101 return NULL;
102}
103
104static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
105 struct idxd_desc *desc)
106{
107 struct idxd_desc *d, *t, *found = NULL;
108 struct llist_node *head;
Dave Jiang8affd8a2021-12-08 10:01:27 -0700109 LIST_HEAD(flist);
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700110
111 desc->completion->status = IDXD_COMP_DESC_ABORT;
112 /*
113 * Grab the list lock so it will block the irq thread handler. This allows the
114 * abort code to locate the descriptor need to be aborted.
115 */
Dave Jiang9fce3b32021-08-06 08:36:43 -0700116 spin_lock(&ie->list_lock);
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700117 head = llist_del_all(&ie->pending_llist);
118 if (head) {
119 llist_for_each_entry_safe(d, t, head, llnode) {
120 if (d == desc) {
121 found = desc;
122 continue;
123 }
Dave Jiang8affd8a2021-12-08 10:01:27 -0700124
125 if (d->completion->status)
126 list_add_tail(&d->list, &flist);
127 else
128 list_add_tail(&d->list, &ie->work_list);
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700129 }
130 }
131
132 if (!found)
133 found = list_abort_desc(wq, ie, desc);
Dave Jiang9fce3b32021-08-06 08:36:43 -0700134 spin_unlock(&ie->list_lock);
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700135
136 if (found)
137 complete_desc(found, IDXD_COMPLETE_ABORT);
Dave Jiang8affd8a2021-12-08 10:01:27 -0700138
139 /*
140 * complete_desc() will return desc to allocator and the desc can be
141 * acquired by a different process and the desc->list can be modified.
142 * Delete desc from list so the list trasversing does not get corrupted
143 * by the other process.
144 */
145 list_for_each_entry_safe(d, t, &flist, list) {
146 list_del_init(&d->list);
147 complete_desc(d, IDXD_COMPLETE_NORMAL);
148 }
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700149}
150
Dave Jiangd1dfe5b2020-01-21 16:44:17 -0700151int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
152{
153 struct idxd_device *idxd = wq->idxd;
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700154 struct idxd_irq_entry *ie = NULL;
Dave Jiang42d279f2020-01-21 16:44:29 -0700155 void __iomem *portal;
Dave Jiang8e50d392020-10-27 10:34:35 -0700156 int rc;
Dave Jiangd1dfe5b2020-01-21 16:44:17 -0700157
Dave Jiang0b030f52021-08-06 10:37:40 -0700158 if (idxd->state != IDXD_DEV_ENABLED) {
159 idxd_free_desc(wq, desc);
Dave Jiangd1dfe5b2020-01-21 16:44:17 -0700160 return -EIO;
Dave Jiang0b030f52021-08-06 10:37:40 -0700161 }
Dave Jiangd1dfe5b2020-01-21 16:44:17 -0700162
Dave Jiang0b030f52021-08-06 10:37:40 -0700163 if (!percpu_ref_tryget_live(&wq->wq_active)) {
164 idxd_free_desc(wq, desc);
Dave Jiang93a40a62021-04-20 11:46:22 -0700165 return -ENXIO;
Dave Jiang0b030f52021-08-06 10:37:40 -0700166 }
Dave Jiang93a40a62021-04-20 11:46:22 -0700167
Dave Jianga9c17152021-07-20 13:42:04 -0700168 portal = idxd_wq_portal_addr(wq);
Dave Jiang8e50d392020-10-27 10:34:35 -0700169
Dave Jiangd1dfe5b2020-01-21 16:44:17 -0700170 /*
Dave Jiang8e50d392020-10-27 10:34:35 -0700171 * The wmb() flushes writes to coherent DMA data before
172 * possibly triggering a DMA read. The wmb() is necessary
173 * even on UP because the recipient is a device.
Dave Jiangd1dfe5b2020-01-21 16:44:17 -0700174 */
175 wmb();
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700176
177 /*
178 * Pending the descriptor to the lockless list for the irq_entry
179 * that we designated the descriptor to.
180 */
181 if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
Vinod Koul88c5d0a2021-07-21 09:53:59 +0530182 ie = &idxd->irq_entries[wq->id + 1];
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700183 llist_add(&desc->llnode, &ie->pending_llist);
184 }
185
Dave Jiang8e50d392020-10-27 10:34:35 -0700186 if (wq_dedicated(wq)) {
187 iosubmit_cmds512(portal, desc->hw, 1);
188 } else {
189 /*
190 * It's not likely that we would receive queue full rejection
191 * since the descriptor allocation gates at wq size. If we
192 * receive a -EAGAIN, that means something went wrong such as the
193 * device is not accepting descriptor at all.
194 */
195 rc = enqcmds(portal, desc->hw);
Dave Jiangac24a2d2021-06-24 12:08:21 -0700196 if (rc < 0) {
197 percpu_ref_put(&wq->wq_active);
Dave Jiang0b030f52021-08-06 10:37:40 -0700198 /* abort operation frees the descriptor */
Dave Jiang6b4b87f2021-07-14 11:50:06 -0700199 if (ie)
200 llist_abort_desc(wq, ie, desc);
Dave Jiang0b030f52021-08-06 10:37:40 -0700201 else
202 idxd_free_desc(wq, desc);
Dave Jiang8e50d392020-10-27 10:34:35 -0700203 return rc;
Dave Jiangac24a2d2021-06-24 12:08:21 -0700204 }
Dave Jiang8e50d392020-10-27 10:34:35 -0700205 }
Dave Jiangd1dfe5b2020-01-21 16:44:17 -0700206
Dave Jiang93a40a62021-04-20 11:46:22 -0700207 percpu_ref_put(&wq->wq_active);
Dave Jiangd1dfe5b2020-01-21 16:44:17 -0700208 return 0;
209}