blob: be922a8c278466ec6de665dba3d4da282e523809 [file] [log] [blame]
Dave Jiangbfe1d562020-01-21 16:43:59 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/pci.h>
8#include <linux/interrupt.h>
9#include <linux/delay.h>
10#include <linux/dma-mapping.h>
11#include <linux/workqueue.h>
12#include <linux/aer.h>
13#include <linux/fs.h>
14#include <linux/io-64-nonatomic-lo-hi.h>
15#include <linux/device.h>
16#include <linux/idr.h>
Dave Jiang8e50d392020-10-27 10:34:35 -070017#include <linux/intel-svm.h>
18#include <linux/iommu.h>
Dave Jiangbfe1d562020-01-21 16:43:59 -070019#include <uapi/linux/idxd.h>
Dave Jiang8f47d1a2020-01-21 16:44:23 -070020#include <linux/dmaengine.h>
21#include "../dmaengine.h"
Dave Jiangbfe1d562020-01-21 16:43:59 -070022#include "registers.h"
23#include "idxd.h"
24
25MODULE_VERSION(IDXD_DRIVER_VERSION);
26MODULE_LICENSE("GPL v2");
27MODULE_AUTHOR("Intel Corporation");
28
Dave Jiang03d939c2021-01-22 11:46:00 -070029static bool sva = true;
30module_param(sva, bool, 0644);
31MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
32
Dave Jiangbfe1d562020-01-21 16:43:59 -070033#define DRV_NAME "idxd"
34
Dave Jiang8e50d392020-10-27 10:34:35 -070035bool support_enqcmd;
Dave Jiang4b73e4e2021-04-15 16:38:03 -070036DEFINE_IDA(idxd_ida);
Dave Jiangbfe1d562020-01-21 16:43:59 -070037
38static struct pci_device_id idxd_pci_tbl[] = {
39 /* DSA ver 1.0 platforms */
40 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
Dave Jiangf25b46382020-11-17 13:39:14 -070041
42 /* IAX ver 1.0 platforms */
43 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
Dave Jiangbfe1d562020-01-21 16:43:59 -070044 { 0, }
45};
46MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
47
48static char *idxd_name[] = {
49 "dsa",
Dave Jiangf25b46382020-11-17 13:39:14 -070050 "iax"
Dave Jiangbfe1d562020-01-21 16:43:59 -070051};
52
53const char *idxd_get_dev_name(struct idxd_device *idxd)
54{
55 return idxd_name[idxd->type];
56}
57
58static int idxd_setup_interrupts(struct idxd_device *idxd)
59{
60 struct pci_dev *pdev = idxd->pdev;
61 struct device *dev = &pdev->dev;
Dave Jiangbfe1d562020-01-21 16:43:59 -070062 struct idxd_irq_entry *irq_entry;
63 int i, msixcnt;
64 int rc = 0;
65
66 msixcnt = pci_msix_vec_count(pdev);
67 if (msixcnt < 0) {
68 dev_err(dev, "Not MSI-X interrupt capable.\n");
Dave Jiang5fc8e852021-04-15 16:37:15 -070069 return -ENOSPC;
Dave Jiangbfe1d562020-01-21 16:43:59 -070070 }
71
Dave Jiang5fc8e852021-04-15 16:37:15 -070072 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
73 if (rc != msixcnt) {
74 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
75 return -ENOSPC;
Dave Jiangbfe1d562020-01-21 16:43:59 -070076 }
77 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
78
79 /*
80 * We implement 1 completion list per MSI-X entry except for
81 * entry 0, which is for errors and others.
82 */
Dave Jiang47c16ac2021-04-15 16:37:33 -070083 idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
84 GFP_KERNEL, dev_to_node(dev));
Dave Jiangbfe1d562020-01-21 16:43:59 -070085 if (!idxd->irq_entries) {
86 rc = -ENOMEM;
Dave Jiang5fc8e852021-04-15 16:37:15 -070087 goto err_irq_entries;
Dave Jiangbfe1d562020-01-21 16:43:59 -070088 }
89
90 for (i = 0; i < msixcnt; i++) {
91 idxd->irq_entries[i].id = i;
92 idxd->irq_entries[i].idxd = idxd;
Dave Jiang5fc8e852021-04-15 16:37:15 -070093 idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
Dave Jiange4f4d8c2020-10-27 10:34:40 -070094 spin_lock_init(&idxd->irq_entries[i].list_lock);
Dave Jiangbfe1d562020-01-21 16:43:59 -070095 }
96
Dave Jiangbfe1d562020-01-21 16:43:59 -070097 irq_entry = &idxd->irq_entries[0];
Dave Jiang5fc8e852021-04-15 16:37:15 -070098 rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
99 0, "idxd-misc", irq_entry);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700100 if (rc < 0) {
101 dev_err(dev, "Failed to allocate misc interrupt.\n");
Dave Jiang5fc8e852021-04-15 16:37:15 -0700102 goto err_misc_irq;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700103 }
104
Dave Jiang5fc8e852021-04-15 16:37:15 -0700105 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700106
107 /* first MSI-X entry is not for wq interrupts */
108 idxd->num_wq_irqs = msixcnt - 1;
109
110 for (i = 1; i < msixcnt; i++) {
Dave Jiangbfe1d562020-01-21 16:43:59 -0700111 irq_entry = &idxd->irq_entries[i];
112
113 init_llist_head(&idxd->irq_entries[i].pending_llist);
114 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
Dave Jiang5fc8e852021-04-15 16:37:15 -0700115 rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
116 idxd_wq_thread, 0, "idxd-portal", irq_entry);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700117 if (rc < 0) {
Dave Jiang5fc8e852021-04-15 16:37:15 -0700118 dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
119 goto err_wq_irqs;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700120 }
Dave Jiang5fc8e852021-04-15 16:37:15 -0700121 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700122 }
123
124 idxd_unmask_error_interrupts(idxd);
Dave Jiang6df0e6c2021-04-12 09:23:27 -0700125 idxd_msix_perm_setup(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700126 return 0;
127
Dave Jiang5fc8e852021-04-15 16:37:15 -0700128 err_wq_irqs:
129 while (--i >= 0) {
130 irq_entry = &idxd->irq_entries[i];
131 free_irq(irq_entry->vector, irq_entry);
132 }
133 err_misc_irq:
Dave Jiangbfe1d562020-01-21 16:43:59 -0700134 /* Disable error interrupt generation */
135 idxd_mask_error_interrupts(idxd);
Dave Jiang5fc8e852021-04-15 16:37:15 -0700136 err_irq_entries:
137 pci_free_irq_vectors(pdev);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700138 dev_err(dev, "No usable interrupts\n");
139 return rc;
140}
141
Dave Jiang7c5dd232021-04-15 16:37:39 -0700142static int idxd_setup_wqs(struct idxd_device *idxd)
143{
144 struct device *dev = &idxd->pdev->dev;
145 struct idxd_wq *wq;
146 int i, rc;
147
148 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
149 GFP_KERNEL, dev_to_node(dev));
150 if (!idxd->wqs)
151 return -ENOMEM;
152
153 for (i = 0; i < idxd->max_wqs; i++) {
154 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
155 if (!wq) {
156 rc = -ENOMEM;
157 goto err;
158 }
159
160 wq->id = i;
161 wq->idxd = idxd;
162 device_initialize(&wq->conf_dev);
163 wq->conf_dev.parent = &idxd->conf_dev;
Dave Jiang4b73e4e2021-04-15 16:38:03 -0700164 wq->conf_dev.bus = &dsa_bus_type;
Dave Jiang7c5dd232021-04-15 16:37:39 -0700165 wq->conf_dev.type = &idxd_wq_device_type;
166 rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
167 if (rc < 0) {
168 put_device(&wq->conf_dev);
169 goto err;
170 }
171
172 mutex_init(&wq->wq_lock);
Dave Jiang04922b72021-04-15 16:37:57 -0700173 init_waitqueue_head(&wq->err_queue);
Dave Jiang7c5dd232021-04-15 16:37:39 -0700174 wq->max_xfer_bytes = idxd->max_xfer_bytes;
175 wq->max_batch_size = idxd->max_batch_size;
176 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
177 if (!wq->wqcfg) {
178 put_device(&wq->conf_dev);
179 rc = -ENOMEM;
180 goto err;
181 }
182 idxd->wqs[i] = wq;
183 }
184
185 return 0;
186
187 err:
188 while (--i >= 0)
189 put_device(&idxd->wqs[i]->conf_dev);
190 return rc;
191}
192
Dave Jiang75b91132021-04-15 16:37:44 -0700193static int idxd_setup_engines(struct idxd_device *idxd)
194{
195 struct idxd_engine *engine;
196 struct device *dev = &idxd->pdev->dev;
197 int i, rc;
198
199 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
200 GFP_KERNEL, dev_to_node(dev));
201 if (!idxd->engines)
202 return -ENOMEM;
203
204 for (i = 0; i < idxd->max_engines; i++) {
205 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
206 if (!engine) {
207 rc = -ENOMEM;
208 goto err;
209 }
210
211 engine->id = i;
212 engine->idxd = idxd;
213 device_initialize(&engine->conf_dev);
214 engine->conf_dev.parent = &idxd->conf_dev;
215 engine->conf_dev.type = &idxd_engine_device_type;
216 rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
217 if (rc < 0) {
218 put_device(&engine->conf_dev);
219 goto err;
220 }
221
222 idxd->engines[i] = engine;
223 }
224
225 return 0;
226
227 err:
228 while (--i >= 0)
229 put_device(&idxd->engines[i]->conf_dev);
230 return rc;
231}
232
Dave Jiangdefe49f2021-04-15 16:37:51 -0700233static int idxd_setup_groups(struct idxd_device *idxd)
234{
235 struct device *dev = &idxd->pdev->dev;
236 struct idxd_group *group;
237 int i, rc;
238
239 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
240 GFP_KERNEL, dev_to_node(dev));
241 if (!idxd->groups)
242 return -ENOMEM;
243
244 for (i = 0; i < idxd->max_groups; i++) {
245 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
246 if (!group) {
247 rc = -ENOMEM;
248 goto err;
249 }
250
251 group->id = i;
252 group->idxd = idxd;
253 device_initialize(&group->conf_dev);
254 group->conf_dev.parent = &idxd->conf_dev;
Dave Jiang4b73e4e2021-04-15 16:38:03 -0700255 group->conf_dev.bus = &dsa_bus_type;
Dave Jiangdefe49f2021-04-15 16:37:51 -0700256 group->conf_dev.type = &idxd_group_device_type;
257 rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
258 if (rc < 0) {
259 put_device(&group->conf_dev);
260 goto err;
261 }
262
263 idxd->groups[i] = group;
264 group->tc_a = -1;
265 group->tc_b = -1;
266 }
267
268 return 0;
269
270 err:
271 while (--i >= 0)
272 put_device(&idxd->groups[i]->conf_dev);
273 return rc;
274}
275
Dave Jiangbfe1d562020-01-21 16:43:59 -0700276static int idxd_setup_internals(struct idxd_device *idxd)
277{
278 struct device *dev = &idxd->pdev->dev;
Dave Jiangdefe49f2021-04-15 16:37:51 -0700279 int rc, i;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700280
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700281 init_waitqueue_head(&idxd->cmd_waitq);
Dave Jiang7c5dd232021-04-15 16:37:39 -0700282
283 rc = idxd_setup_wqs(idxd);
284 if (rc < 0)
285 return rc;
286
Dave Jiang75b91132021-04-15 16:37:44 -0700287 rc = idxd_setup_engines(idxd);
288 if (rc < 0)
289 goto err_engine;
290
Dave Jiangdefe49f2021-04-15 16:37:51 -0700291 rc = idxd_setup_groups(idxd);
292 if (rc < 0)
293 goto err_group;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700294
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700295 idxd->wq = create_workqueue(dev_name(dev));
Dave Jiang7c5dd232021-04-15 16:37:39 -0700296 if (!idxd->wq) {
297 rc = -ENOMEM;
Dave Jiangdefe49f2021-04-15 16:37:51 -0700298 goto err_wkq_create;
Dave Jiang7c5dd232021-04-15 16:37:39 -0700299 }
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700300
Dave Jiangbfe1d562020-01-21 16:43:59 -0700301 return 0;
Dave Jiang7c5dd232021-04-15 16:37:39 -0700302
Dave Jiangdefe49f2021-04-15 16:37:51 -0700303 err_wkq_create:
304 for (i = 0; i < idxd->max_groups; i++)
305 put_device(&idxd->groups[i]->conf_dev);
306 err_group:
Dave Jiang75b91132021-04-15 16:37:44 -0700307 for (i = 0; i < idxd->max_engines; i++)
308 put_device(&idxd->engines[i]->conf_dev);
309 err_engine:
Dave Jiang7c5dd232021-04-15 16:37:39 -0700310 for (i = 0; i < idxd->max_wqs; i++)
311 put_device(&idxd->wqs[i]->conf_dev);
312 return rc;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700313}
314
315static void idxd_read_table_offsets(struct idxd_device *idxd)
316{
317 union offsets_reg offsets;
318 struct device *dev = &idxd->pdev->dev;
319
320 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
Dave Jiang2f8417a2020-10-30 08:51:56 -0700321 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
322 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700323 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
Dave Jiang2f8417a2020-10-30 08:51:56 -0700324 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
325 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
326 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
327 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
328 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700329 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
330}
331
332static void idxd_read_caps(struct idxd_device *idxd)
333{
334 struct device *dev = &idxd->pdev->dev;
335 int i;
336
337 /* reading generic capabilities */
338 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
339 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
340 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
341 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
342 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
343 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
344 if (idxd->hw.gen_cap.config_en)
345 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
346
347 /* reading group capabilities */
348 idxd->hw.group_cap.bits =
349 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
350 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
351 idxd->max_groups = idxd->hw.group_cap.num_groups;
352 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
353 idxd->max_tokens = idxd->hw.group_cap.total_tokens;
354 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
Dave Jiangc52ca472020-01-21 16:44:05 -0700355 idxd->nr_tokens = idxd->max_tokens;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700356
357 /* read engine capabilities */
358 idxd->hw.engine_cap.bits =
359 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
360 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
361 idxd->max_engines = idxd->hw.engine_cap.num_engines;
362 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
363
364 /* read workqueue capabilities */
365 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
366 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
367 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
368 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
369 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
370 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
Dave Jiangd98793b2020-10-27 14:34:09 -0700371 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
372 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700373
374 /* reading operation capabilities */
375 for (i = 0; i < 4; i++) {
376 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
377 IDXD_OPCAP_OFFSET + i * sizeof(u64));
378 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
379 }
380}
381
Dave Jiang47c16ac2021-04-15 16:37:33 -0700382static inline void idxd_set_type(struct idxd_device *idxd)
383{
384 struct pci_dev *pdev = idxd->pdev;
385
386 if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
387 idxd->type = IDXD_TYPE_DSA;
388 else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
389 idxd->type = IDXD_TYPE_IAX;
390 else
391 idxd->type = IDXD_TYPE_UNKNOWN;
392}
393
Dave Jiang8e50d392020-10-27 10:34:35 -0700394static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
Dave Jiangbfe1d562020-01-21 16:43:59 -0700395{
396 struct device *dev = &pdev->dev;
397 struct idxd_device *idxd;
Dave Jiang47c16ac2021-04-15 16:37:33 -0700398 int rc;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700399
Dave Jiang47c16ac2021-04-15 16:37:33 -0700400 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
Dave Jiangbfe1d562020-01-21 16:43:59 -0700401 if (!idxd)
402 return NULL;
403
404 idxd->pdev = pdev;
Dave Jiang47c16ac2021-04-15 16:37:33 -0700405 idxd_set_type(idxd);
Dave Jiang4b73e4e2021-04-15 16:38:03 -0700406 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
Dave Jiang47c16ac2021-04-15 16:37:33 -0700407 if (idxd->id < 0)
408 return NULL;
409
410 device_initialize(&idxd->conf_dev);
411 idxd->conf_dev.parent = dev;
Dave Jiang4b73e4e2021-04-15 16:38:03 -0700412 idxd->conf_dev.bus = &dsa_bus_type;
Dave Jiang47c16ac2021-04-15 16:37:33 -0700413 idxd->conf_dev.type = idxd_get_device_type(idxd);
414 rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
415 if (rc < 0) {
416 put_device(&idxd->conf_dev);
417 return NULL;
418 }
419
Dave Jiangbfe1d562020-01-21 16:43:59 -0700420 spin_lock_init(&idxd->dev_lock);
421
422 return idxd;
423}
424
Dave Jiang8e50d392020-10-27 10:34:35 -0700425static int idxd_enable_system_pasid(struct idxd_device *idxd)
426{
427 int flags;
428 unsigned int pasid;
429 struct iommu_sva *sva;
430
431 flags = SVM_FLAG_SUPERVISOR_MODE;
432
433 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
434 if (IS_ERR(sva)) {
435 dev_warn(&idxd->pdev->dev,
436 "iommu sva bind failed: %ld\n", PTR_ERR(sva));
437 return PTR_ERR(sva);
438 }
439
440 pasid = iommu_sva_get_pasid(sva);
441 if (pasid == IOMMU_PASID_INVALID) {
442 iommu_sva_unbind_device(sva);
443 return -ENODEV;
444 }
445
446 idxd->sva = sva;
447 idxd->pasid = pasid;
448 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
449 return 0;
450}
451
452static void idxd_disable_system_pasid(struct idxd_device *idxd)
453{
454
455 iommu_sva_unbind_device(idxd->sva);
456 idxd->sva = NULL;
457}
458
Dave Jiangbfe1d562020-01-21 16:43:59 -0700459static int idxd_probe(struct idxd_device *idxd)
460{
461 struct pci_dev *pdev = idxd->pdev;
462 struct device *dev = &pdev->dev;
463 int rc;
464
465 dev_dbg(dev, "%s entered and resetting device\n", __func__);
Dave Jiang89e3bec2021-02-01 08:26:14 -0700466 rc = idxd_device_init_reset(idxd);
467 if (rc < 0)
468 return rc;
469
Dave Jiangbfe1d562020-01-21 16:43:59 -0700470 dev_dbg(dev, "IDXD reset complete\n");
471
Dave Jiang03d939c2021-01-22 11:46:00 -0700472 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
Dave Jiang8e50d392020-10-27 10:34:35 -0700473 rc = idxd_enable_system_pasid(idxd);
474 if (rc < 0)
475 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
476 else
477 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
Dave Jiang03d939c2021-01-22 11:46:00 -0700478 } else if (!sva) {
479 dev_warn(dev, "User forced SVA off via module param.\n");
Dave Jiang8e50d392020-10-27 10:34:35 -0700480 }
481
Dave Jiangbfe1d562020-01-21 16:43:59 -0700482 idxd_read_caps(idxd);
483 idxd_read_table_offsets(idxd);
484
485 rc = idxd_setup_internals(idxd);
486 if (rc)
Dave Jiang7c5dd232021-04-15 16:37:39 -0700487 goto err;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700488
489 rc = idxd_setup_interrupts(idxd);
490 if (rc)
Dave Jiang7c5dd232021-04-15 16:37:39 -0700491 goto err;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700492
493 dev_dbg(dev, "IDXD interrupt setup complete.\n");
494
Dave Jiang42d279f2020-01-21 16:44:29 -0700495 idxd->major = idxd_cdev_get_major(idxd);
496
Dave Jiangbfe1d562020-01-21 16:43:59 -0700497 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
498 return 0;
499
Dave Jiang7c5dd232021-04-15 16:37:39 -0700500 err:
Dave Jiang8e50d392020-10-27 10:34:35 -0700501 if (device_pasid_enabled(idxd))
502 idxd_disable_system_pasid(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700503 return rc;
504}
505
Dave Jiangf25b46382020-11-17 13:39:14 -0700506static void idxd_type_init(struct idxd_device *idxd)
507{
508 if (idxd->type == IDXD_TYPE_DSA)
509 idxd->compl_size = sizeof(struct dsa_completion_record);
510 else if (idxd->type == IDXD_TYPE_IAX)
511 idxd->compl_size = sizeof(struct iax_completion_record);
512}
513
Dave Jiangbfe1d562020-01-21 16:43:59 -0700514static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
515{
Dave Jiangbfe1d562020-01-21 16:43:59 -0700516 struct device *dev = &pdev->dev;
517 struct idxd_device *idxd;
518 int rc;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700519
Dave Jianga39c7cd2021-04-15 16:37:21 -0700520 rc = pci_enable_device(pdev);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700521 if (rc)
522 return rc;
523
Dave Jiang8e50d392020-10-27 10:34:35 -0700524 dev_dbg(dev, "Alloc IDXD context\n");
525 idxd = idxd_alloc(pdev);
Dave Jianga39c7cd2021-04-15 16:37:21 -0700526 if (!idxd) {
527 rc = -ENOMEM;
528 goto err_idxd_alloc;
529 }
Dave Jiangbfe1d562020-01-21 16:43:59 -0700530
Dave Jiang8e50d392020-10-27 10:34:35 -0700531 dev_dbg(dev, "Mapping BARs\n");
Dave Jianga39c7cd2021-04-15 16:37:21 -0700532 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
533 if (!idxd->reg_base) {
534 rc = -ENOMEM;
535 goto err_iomap;
536 }
Dave Jiangbfe1d562020-01-21 16:43:59 -0700537
538 dev_dbg(dev, "Set DMA masks\n");
539 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
540 if (rc)
541 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
542 if (rc)
Dave Jianga39c7cd2021-04-15 16:37:21 -0700543 goto err;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700544
545 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
546 if (rc)
547 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
548 if (rc)
Dave Jianga39c7cd2021-04-15 16:37:21 -0700549 goto err;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700550
Dave Jiangbfe1d562020-01-21 16:43:59 -0700551
Dave Jiangf25b46382020-11-17 13:39:14 -0700552 idxd_type_init(idxd);
553
Dave Jiangbfe1d562020-01-21 16:43:59 -0700554 dev_dbg(dev, "Set PCI master\n");
555 pci_set_master(pdev);
556 pci_set_drvdata(pdev, idxd);
557
558 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
559 rc = idxd_probe(idxd);
560 if (rc) {
561 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
Dave Jianga39c7cd2021-04-15 16:37:21 -0700562 goto err;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700563 }
564
Dave Jiang47c16ac2021-04-15 16:37:33 -0700565 rc = idxd_register_devices(idxd);
Dave Jiangc52ca472020-01-21 16:44:05 -0700566 if (rc) {
567 dev_err(dev, "IDXD sysfs setup failed\n");
Dave Jianga39c7cd2021-04-15 16:37:21 -0700568 goto err;
Dave Jiangc52ca472020-01-21 16:44:05 -0700569 }
570
571 idxd->state = IDXD_DEV_CONF_READY;
572
Dave Jiangbfe1d562020-01-21 16:43:59 -0700573 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
574 idxd->hw.version);
575
576 return 0;
Dave Jianga39c7cd2021-04-15 16:37:21 -0700577
578 err:
579 pci_iounmap(pdev, idxd->reg_base);
580 err_iomap:
Dave Jiang47c16ac2021-04-15 16:37:33 -0700581 put_device(&idxd->conf_dev);
Dave Jianga39c7cd2021-04-15 16:37:21 -0700582 err_idxd_alloc:
583 pci_disable_device(pdev);
584 return rc;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700585}
586
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700587static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
588{
589 struct idxd_desc *desc, *itr;
590 struct llist_node *head;
591
592 head = llist_del_all(&ie->pending_llist);
593 if (!head)
594 return;
595
596 llist_for_each_entry_safe(desc, itr, head, llnode) {
597 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
598 idxd_free_desc(desc->wq, desc);
599 }
600}
601
602static void idxd_flush_work_list(struct idxd_irq_entry *ie)
603{
604 struct idxd_desc *desc, *iter;
605
606 list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
607 list_del(&desc->list);
608 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
609 idxd_free_desc(desc->wq, desc);
610 }
611}
612
Dave Jiangbfe1d562020-01-21 16:43:59 -0700613static void idxd_shutdown(struct pci_dev *pdev)
614{
615 struct idxd_device *idxd = pci_get_drvdata(pdev);
616 int rc, i;
617 struct idxd_irq_entry *irq_entry;
618 int msixcnt = pci_msix_vec_count(pdev);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700619
Dave Jiangbfe1d562020-01-21 16:43:59 -0700620 rc = idxd_device_disable(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700621 if (rc)
622 dev_err(&pdev->dev, "Disabling device failed\n");
623
624 dev_dbg(&pdev->dev, "%s called\n", __func__);
625 idxd_mask_msix_vectors(idxd);
626 idxd_mask_error_interrupts(idxd);
627
628 for (i = 0; i < msixcnt; i++) {
629 irq_entry = &idxd->irq_entries[i];
Dave Jiang5fc8e852021-04-15 16:37:15 -0700630 synchronize_irq(irq_entry->vector);
631 free_irq(irq_entry->vector, irq_entry);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700632 if (i == 0)
633 continue;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700634 idxd_flush_pending_llist(irq_entry);
635 idxd_flush_work_list(irq_entry);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700636 }
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700637
Dave Jiang6df0e6c2021-04-12 09:23:27 -0700638 idxd_msix_perm_clear(idxd);
Dave Jiang5fc8e852021-04-15 16:37:15 -0700639 pci_free_irq_vectors(pdev);
Dave Jianga39c7cd2021-04-15 16:37:21 -0700640 pci_iounmap(pdev, idxd->reg_base);
641 pci_disable_device(pdev);
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700642 destroy_workqueue(idxd->wq);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700643}
644
645static void idxd_remove(struct pci_dev *pdev)
646{
647 struct idxd_device *idxd = pci_get_drvdata(pdev);
648
649 dev_dbg(&pdev->dev, "%s called\n", __func__);
650 idxd_shutdown(pdev);
Dave Jiang8e50d392020-10-27 10:34:35 -0700651 if (device_pasid_enabled(idxd))
652 idxd_disable_system_pasid(idxd);
Dave Jiang47c16ac2021-04-15 16:37:33 -0700653 idxd_unregister_devices(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700654}
655
656static struct pci_driver idxd_pci_driver = {
657 .name = DRV_NAME,
658 .id_table = idxd_pci_tbl,
659 .probe = idxd_pci_probe,
660 .remove = idxd_remove,
661 .shutdown = idxd_shutdown,
662};
663
664static int __init idxd_init_module(void)
665{
Dave Jiang4b73e4e2021-04-15 16:38:03 -0700666 int err;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700667
668 /*
Dave Jiang8e50d392020-10-27 10:34:35 -0700669 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
Dave Jiangbfe1d562020-01-21 16:43:59 -0700670 * enumerating the device. We can not utilize it.
671 */
672 if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
673 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
674 return -ENODEV;
675 }
676
Dave Jiang8e50d392020-10-27 10:34:35 -0700677 if (!boot_cpu_has(X86_FEATURE_ENQCMD))
678 pr_warn("Platform does not have ENQCMD(S) support.\n");
679 else
680 support_enqcmd = true;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700681
Dave Jiangc52ca472020-01-21 16:44:05 -0700682 err = idxd_register_bus_type();
683 if (err < 0)
Dave Jiangbfe1d562020-01-21 16:43:59 -0700684 return err;
685
Dave Jiangc52ca472020-01-21 16:44:05 -0700686 err = idxd_register_driver();
687 if (err < 0)
688 goto err_idxd_driver_register;
689
Dave Jiang42d279f2020-01-21 16:44:29 -0700690 err = idxd_cdev_register();
691 if (err)
692 goto err_cdev_register;
693
Dave Jiangc52ca472020-01-21 16:44:05 -0700694 err = pci_register_driver(&idxd_pci_driver);
695 if (err)
696 goto err_pci_register;
697
Dave Jiangbfe1d562020-01-21 16:43:59 -0700698 return 0;
Dave Jiangc52ca472020-01-21 16:44:05 -0700699
700err_pci_register:
Dave Jiang42d279f2020-01-21 16:44:29 -0700701 idxd_cdev_remove();
702err_cdev_register:
Dave Jiangc52ca472020-01-21 16:44:05 -0700703 idxd_unregister_driver();
704err_idxd_driver_register:
705 idxd_unregister_bus_type();
706 return err;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700707}
708module_init(idxd_init_module);
709
710static void __exit idxd_exit_module(void)
711{
712 pci_unregister_driver(&idxd_pci_driver);
Dave Jiang42d279f2020-01-21 16:44:29 -0700713 idxd_cdev_remove();
Dave Jiangc52ca472020-01-21 16:44:05 -0700714 idxd_unregister_bus_type();
Dave Jiangbfe1d562020-01-21 16:43:59 -0700715}
716module_exit(idxd_exit_module);