blob: b5142556cc4e8648867d0d8162e1f835916801d3 [file] [log] [blame]
Dave Jiangbfe1d562020-01-21 16:43:59 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/io-64-nonatomic-lo-hi.h>
Dave Jiang8f47d1a2020-01-21 16:44:23 -07008#include <linux/dmaengine.h>
Dave Jiangbfe1d562020-01-21 16:43:59 -07009#include <uapi/linux/idxd.h>
Dave Jiang8f47d1a2020-01-21 16:44:23 -070010#include "../dmaengine.h"
Dave Jiangbfe1d562020-01-21 16:43:59 -070011#include "idxd.h"
12#include "registers.h"
13
14void idxd_device_wqs_clear_state(struct idxd_device *idxd)
15{
16 int i;
17
18 lockdep_assert_held(&idxd->dev_lock);
19 for (i = 0; i < idxd->max_wqs; i++) {
20 struct idxd_wq *wq = &idxd->wqs[i];
21
22 wq->state = IDXD_WQ_DISABLED;
23 }
24}
25
Dave Jiang0d5c10b2020-06-26 11:11:18 -070026static void idxd_device_reinit(struct work_struct *work)
Dave Jiangbfe1d562020-01-21 16:43:59 -070027{
Dave Jiang0d5c10b2020-06-26 11:11:18 -070028 struct idxd_device *idxd = container_of(work, struct idxd_device, work);
29 struct device *dev = &idxd->pdev->dev;
30 int rc, i;
Dave Jiangbfe1d562020-01-21 16:43:59 -070031
Dave Jiang0d5c10b2020-06-26 11:11:18 -070032 idxd_device_reset(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -070033 rc = idxd_device_config(idxd);
34 if (rc < 0)
35 goto out;
36
37 rc = idxd_device_enable(idxd);
38 if (rc < 0)
39 goto out;
40
41 for (i = 0; i < idxd->max_wqs; i++) {
42 struct idxd_wq *wq = &idxd->wqs[i];
43
44 if (wq->state == IDXD_WQ_ENABLED) {
45 rc = idxd_wq_enable(wq);
46 if (rc < 0) {
Dave Jiang0d5c10b2020-06-26 11:11:18 -070047 dev_warn(dev, "Unable to re-enable wq %s\n",
Dave Jiangbfe1d562020-01-21 16:43:59 -070048 dev_name(&wq->conf_dev));
49 }
50 }
51 }
52
Dave Jiang0d5c10b2020-06-26 11:11:18 -070053 return;
Dave Jiangbfe1d562020-01-21 16:43:59 -070054
55 out:
56 idxd_device_wqs_clear_state(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -070057}
58
59irqreturn_t idxd_irq_handler(int vec, void *data)
60{
61 struct idxd_irq_entry *irq_entry = data;
62 struct idxd_device *idxd = irq_entry->idxd;
63
64 idxd_mask_msix_vector(idxd, irq_entry->id);
65 return IRQ_WAKE_THREAD;
66}
67
68irqreturn_t idxd_misc_thread(int vec, void *data)
69{
70 struct idxd_irq_entry *irq_entry = data;
71 struct idxd_device *idxd = irq_entry->idxd;
72 struct device *dev = &idxd->pdev->dev;
73 union gensts_reg gensts;
74 u32 cause, val = 0;
Dave Jiang0d5c10b2020-06-26 11:11:18 -070075 int i;
Dave Jiangbfe1d562020-01-21 16:43:59 -070076 bool err = false;
77
78 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
79
80 if (cause & IDXD_INTC_ERR) {
81 spin_lock_bh(&idxd->dev_lock);
82 for (i = 0; i < 4; i++)
83 idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
84 IDXD_SWERR_OFFSET + i * sizeof(u64));
85 iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
Dave Jiang42d279f2020-01-21 16:44:29 -070086
87 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
88 int id = idxd->sw_err.wq_idx;
89 struct idxd_wq *wq = &idxd->wqs[id];
90
91 if (wq->type == IDXD_WQT_USER)
92 wake_up_interruptible(&wq->idxd_cdev.err_queue);
93 } else {
94 int i;
95
96 for (i = 0; i < idxd->max_wqs; i++) {
97 struct idxd_wq *wq = &idxd->wqs[i];
98
99 if (wq->type == IDXD_WQT_USER)
100 wake_up_interruptible(&wq->idxd_cdev.err_queue);
101 }
102 }
103
Dave Jiangbfe1d562020-01-21 16:43:59 -0700104 spin_unlock_bh(&idxd->dev_lock);
105 val |= IDXD_INTC_ERR;
106
107 for (i = 0; i < 4; i++)
108 dev_warn(dev, "err[%d]: %#16.16llx\n",
109 i, idxd->sw_err.bits[i]);
110 err = true;
111 }
112
113 if (cause & IDXD_INTC_CMD) {
Dave Jiangbfe1d562020-01-21 16:43:59 -0700114 val |= IDXD_INTC_CMD;
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700115 complete(idxd->cmd_done);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700116 }
117
118 if (cause & IDXD_INTC_OCCUPY) {
119 /* Driver does not utilize occupancy interrupt */
120 val |= IDXD_INTC_OCCUPY;
121 }
122
123 if (cause & IDXD_INTC_PERFMON_OVFL) {
124 /*
125 * Driver does not utilize perfmon counter overflow interrupt
126 * yet.
127 */
128 val |= IDXD_INTC_PERFMON_OVFL;
129 }
130
131 val ^= cause;
132 if (val)
133 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
134 val);
135
136 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
137 if (!err)
Dave Jiange3122822020-06-25 12:16:54 -0700138 goto out;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700139
140 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
141 if (gensts.state == IDXD_DEVICE_STATE_HALT) {
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700142 idxd->state = IDXD_DEV_HALTED;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700143 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700144 /*
145 * If we need a software reset, we will throw the work
146 * on a system workqueue in order to allow interrupts
147 * for the device command completions.
148 */
149 INIT_WORK(&idxd->work, idxd_device_reinit);
150 queue_work(idxd->wq, &idxd->work);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700151 } else {
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700152 spin_lock_bh(&idxd->dev_lock);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700153 idxd_device_wqs_clear_state(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700154 dev_err(&idxd->pdev->dev,
155 "idxd halted, need %s.\n",
156 gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
157 "FLR" : "system reset");
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700158 spin_unlock_bh(&idxd->dev_lock);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700159 }
Dave Jiangbfe1d562020-01-21 16:43:59 -0700160 }
161
Dave Jiange3122822020-06-25 12:16:54 -0700162 out:
Dave Jiangbfe1d562020-01-21 16:43:59 -0700163 idxd_unmask_msix_vector(idxd, irq_entry->id);
164 return IRQ_HANDLED;
165}
166
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700167static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
168 int *processed)
169{
170 struct idxd_desc *desc, *t;
171 struct llist_node *head;
172 int queued = 0;
173
Dave Jiang4f302642020-05-01 08:21:18 -0700174 *processed = 0;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700175 head = llist_del_all(&irq_entry->pending_llist);
176 if (!head)
177 return 0;
178
179 llist_for_each_entry_safe(desc, t, head, llnode) {
180 if (desc->completion->status) {
181 idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
182 idxd_free_desc(desc->wq, desc);
183 (*processed)++;
184 } else {
185 list_add_tail(&desc->list, &irq_entry->work_list);
186 queued++;
187 }
188 }
189
190 return queued;
191}
192
193static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
194 int *processed)
195{
196 struct list_head *node, *next;
197 int queued = 0;
198
Dave Jiang4f302642020-05-01 08:21:18 -0700199 *processed = 0;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700200 if (list_empty(&irq_entry->work_list))
201 return 0;
202
203 list_for_each_safe(node, next, &irq_entry->work_list) {
204 struct idxd_desc *desc =
205 container_of(node, struct idxd_desc, list);
206
207 if (desc->completion->status) {
208 list_del(&desc->list);
209 /* process and callback */
210 idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
211 idxd_free_desc(desc->wq, desc);
212 (*processed)++;
213 } else {
214 queued++;
215 }
216 }
217
218 return queued;
219}
220
Dave Jiang4f302642020-05-01 08:21:18 -0700221static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
Dave Jiangbfe1d562020-01-21 16:43:59 -0700222{
Dave Jiang4f302642020-05-01 08:21:18 -0700223 int rc, processed, total = 0;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700224
225 /*
226 * There are two lists we are processing. The pending_llist is where
227 * submmiter adds all the submitted descriptor after sending it to
228 * the workqueue. It's a lockless singly linked list. The work_list
229 * is the common linux double linked list. We are in a scenario of
230 * multiple producers and a single consumer. The producers are all
231 * the kernel submitters of descriptors, and the consumer is the
232 * kernel irq handler thread for the msix vector when using threaded
233 * irq. To work with the restrictions of llist to remain lockless,
234 * we are doing the following steps:
235 * 1. Iterate through the work_list and process any completed
236 * descriptor. Delete the completed entries during iteration.
237 * 2. llist_del_all() from the pending list.
238 * 3. Iterate through the llist that was deleted from the pending list
239 * and process the completed entries.
240 * 4. If the entry is still waiting on hardware, list_add_tail() to
241 * the work_list.
242 * 5. Repeat until no more descriptors.
243 */
244 do {
245 rc = irq_process_work_list(irq_entry, &processed);
Dave Jiang4f302642020-05-01 08:21:18 -0700246 total += processed;
247 if (rc != 0)
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700248 continue;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700249
250 rc = irq_process_pending_llist(irq_entry, &processed);
Dave Jiang4f302642020-05-01 08:21:18 -0700251 total += processed;
252 } while (rc != 0);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700253
Dave Jiang4f302642020-05-01 08:21:18 -0700254 return total;
255}
256
257irqreturn_t idxd_wq_thread(int irq, void *data)
258{
259 struct idxd_irq_entry *irq_entry = data;
260 int processed;
261
262 processed = idxd_desc_process(irq_entry);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700263 idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
264
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700265 if (processed == 0)
266 return IRQ_NONE;
267
Dave Jiangbfe1d562020-01-21 16:43:59 -0700268 return IRQ_HANDLED;
269}