blob: e5ed5750a6d0b1e54fe5c0750b64d091876c5561 [file] [log] [blame]
Dave Jiangbfe1d562020-01-21 16:43:59 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/pci.h>
8#include <linux/interrupt.h>
9#include <linux/delay.h>
10#include <linux/dma-mapping.h>
11#include <linux/workqueue.h>
12#include <linux/aer.h>
13#include <linux/fs.h>
14#include <linux/io-64-nonatomic-lo-hi.h>
15#include <linux/device.h>
16#include <linux/idr.h>
17#include <uapi/linux/idxd.h>
Dave Jiang8f47d1a2020-01-21 16:44:23 -070018#include <linux/dmaengine.h>
19#include "../dmaengine.h"
Dave Jiangbfe1d562020-01-21 16:43:59 -070020#include "registers.h"
21#include "idxd.h"
22
23MODULE_VERSION(IDXD_DRIVER_VERSION);
24MODULE_LICENSE("GPL v2");
25MODULE_AUTHOR("Intel Corporation");
26
27#define DRV_NAME "idxd"
28
29static struct idr idxd_idrs[IDXD_TYPE_MAX];
30static struct mutex idxd_idr_lock;
31
32static struct pci_device_id idxd_pci_tbl[] = {
33 /* DSA ver 1.0 platforms */
34 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
35 { 0, }
36};
37MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
38
39static char *idxd_name[] = {
40 "dsa",
41};
42
43const char *idxd_get_dev_name(struct idxd_device *idxd)
44{
45 return idxd_name[idxd->type];
46}
47
48static int idxd_setup_interrupts(struct idxd_device *idxd)
49{
50 struct pci_dev *pdev = idxd->pdev;
51 struct device *dev = &pdev->dev;
52 struct msix_entry *msix;
53 struct idxd_irq_entry *irq_entry;
54 int i, msixcnt;
55 int rc = 0;
56
57 msixcnt = pci_msix_vec_count(pdev);
58 if (msixcnt < 0) {
59 dev_err(dev, "Not MSI-X interrupt capable.\n");
60 goto err_no_irq;
61 }
62
63 idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
64 msixcnt, GFP_KERNEL);
65 if (!idxd->msix_entries) {
66 rc = -ENOMEM;
67 goto err_no_irq;
68 }
69
70 for (i = 0; i < msixcnt; i++)
71 idxd->msix_entries[i].entry = i;
72
73 rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
74 if (rc) {
75 dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
76 goto err_no_irq;
77 }
78 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
79
80 /*
81 * We implement 1 completion list per MSI-X entry except for
82 * entry 0, which is for errors and others.
83 */
84 idxd->irq_entries = devm_kcalloc(dev, msixcnt,
85 sizeof(struct idxd_irq_entry),
86 GFP_KERNEL);
87 if (!idxd->irq_entries) {
88 rc = -ENOMEM;
89 goto err_no_irq;
90 }
91
92 for (i = 0; i < msixcnt; i++) {
93 idxd->irq_entries[i].id = i;
94 idxd->irq_entries[i].idxd = idxd;
95 }
96
97 msix = &idxd->msix_entries[0];
98 irq_entry = &idxd->irq_entries[0];
99 rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
100 idxd_misc_thread, 0, "idxd-misc",
101 irq_entry);
102 if (rc < 0) {
103 dev_err(dev, "Failed to allocate misc interrupt.\n");
104 goto err_no_irq;
105 }
106
107 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
108 msix->vector);
109
110 /* first MSI-X entry is not for wq interrupts */
111 idxd->num_wq_irqs = msixcnt - 1;
112
113 for (i = 1; i < msixcnt; i++) {
114 msix = &idxd->msix_entries[i];
115 irq_entry = &idxd->irq_entries[i];
116
117 init_llist_head(&idxd->irq_entries[i].pending_llist);
118 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
119 rc = devm_request_threaded_irq(dev, msix->vector,
120 idxd_irq_handler,
121 idxd_wq_thread, 0,
122 "idxd-portal", irq_entry);
123 if (rc < 0) {
124 dev_err(dev, "Failed to allocate irq %d.\n",
125 msix->vector);
126 goto err_no_irq;
127 }
128 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
129 i, msix->vector);
130 }
131
132 idxd_unmask_error_interrupts(idxd);
133
134 return 0;
135
136 err_no_irq:
137 /* Disable error interrupt generation */
138 idxd_mask_error_interrupts(idxd);
139 pci_disable_msix(pdev);
140 dev_err(dev, "No usable interrupts\n");
141 return rc;
142}
143
Dave Jiangbfe1d562020-01-21 16:43:59 -0700144static int idxd_setup_internals(struct idxd_device *idxd)
145{
146 struct device *dev = &idxd->pdev->dev;
147 int i;
148
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700149 init_waitqueue_head(&idxd->cmd_waitq);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700150 idxd->groups = devm_kcalloc(dev, idxd->max_groups,
151 sizeof(struct idxd_group), GFP_KERNEL);
152 if (!idxd->groups)
153 return -ENOMEM;
154
155 for (i = 0; i < idxd->max_groups; i++) {
156 idxd->groups[i].idxd = idxd;
157 idxd->groups[i].id = i;
158 idxd->groups[i].tc_a = -1;
159 idxd->groups[i].tc_b = -1;
160 }
161
162 idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
163 GFP_KERNEL);
164 if (!idxd->wqs)
165 return -ENOMEM;
166
167 idxd->engines = devm_kcalloc(dev, idxd->max_engines,
168 sizeof(struct idxd_engine), GFP_KERNEL);
169 if (!idxd->engines)
170 return -ENOMEM;
171
172 for (i = 0; i < idxd->max_wqs; i++) {
173 struct idxd_wq *wq = &idxd->wqs[i];
Dave Jiangbfe1d562020-01-21 16:43:59 -0700174
175 wq->id = i;
176 wq->idxd = idxd;
177 mutex_init(&wq->wq_lock);
Dave Jiang42d279f2020-01-21 16:44:29 -0700178 wq->idxd_cdev.minor = -1;
Dave Jiangd7aad552020-08-28 15:12:10 -0700179 wq->max_xfer_bytes = idxd->max_xfer_bytes;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700180 }
181
182 for (i = 0; i < idxd->max_engines; i++) {
183 idxd->engines[i].idxd = idxd;
184 idxd->engines[i].id = i;
185 }
186
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700187 idxd->wq = create_workqueue(dev_name(dev));
188 if (!idxd->wq)
189 return -ENOMEM;
190
Dave Jiangbfe1d562020-01-21 16:43:59 -0700191 return 0;
192}
193
194static void idxd_read_table_offsets(struct idxd_device *idxd)
195{
196 union offsets_reg offsets;
197 struct device *dev = &idxd->pdev->dev;
198
199 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
200 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
201 + sizeof(u64));
202 idxd->grpcfg_offset = offsets.grpcfg * 0x100;
203 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
204 idxd->wqcfg_offset = offsets.wqcfg * 0x100;
205 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
206 idxd->wqcfg_offset);
207 idxd->msix_perm_offset = offsets.msix_perm * 0x100;
208 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
209 idxd->msix_perm_offset);
210 idxd->perfmon_offset = offsets.perfmon * 0x100;
211 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
212}
213
214static void idxd_read_caps(struct idxd_device *idxd)
215{
216 struct device *dev = &idxd->pdev->dev;
217 int i;
218
219 /* reading generic capabilities */
220 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
221 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
222 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
223 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
224 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
225 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
226 if (idxd->hw.gen_cap.config_en)
227 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
228
229 /* reading group capabilities */
230 idxd->hw.group_cap.bits =
231 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
232 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
233 idxd->max_groups = idxd->hw.group_cap.num_groups;
234 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
235 idxd->max_tokens = idxd->hw.group_cap.total_tokens;
236 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
Dave Jiangc52ca472020-01-21 16:44:05 -0700237 idxd->nr_tokens = idxd->max_tokens;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700238
239 /* read engine capabilities */
240 idxd->hw.engine_cap.bits =
241 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
242 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
243 idxd->max_engines = idxd->hw.engine_cap.num_engines;
244 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
245
246 /* read workqueue capabilities */
247 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
248 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
249 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
250 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
251 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
252 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
253
254 /* reading operation capabilities */
255 for (i = 0; i < 4; i++) {
256 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
257 IDXD_OPCAP_OFFSET + i * sizeof(u64));
258 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
259 }
260}
261
262static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
263 void __iomem * const *iomap)
264{
265 struct device *dev = &pdev->dev;
266 struct idxd_device *idxd;
267
268 idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
269 if (!idxd)
270 return NULL;
271
272 idxd->pdev = pdev;
273 idxd->reg_base = iomap[IDXD_MMIO_BAR];
274 spin_lock_init(&idxd->dev_lock);
275
276 return idxd;
277}
278
279static int idxd_probe(struct idxd_device *idxd)
280{
281 struct pci_dev *pdev = idxd->pdev;
282 struct device *dev = &pdev->dev;
283 int rc;
284
285 dev_dbg(dev, "%s entered and resetting device\n", __func__);
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700286 idxd_device_init_reset(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700287 dev_dbg(dev, "IDXD reset complete\n");
288
289 idxd_read_caps(idxd);
290 idxd_read_table_offsets(idxd);
291
292 rc = idxd_setup_internals(idxd);
293 if (rc)
294 goto err_setup;
295
296 rc = idxd_setup_interrupts(idxd);
297 if (rc)
298 goto err_setup;
299
300 dev_dbg(dev, "IDXD interrupt setup complete.\n");
301
302 mutex_lock(&idxd_idr_lock);
303 idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
304 mutex_unlock(&idxd_idr_lock);
305 if (idxd->id < 0) {
306 rc = -ENOMEM;
307 goto err_idr_fail;
308 }
309
Dave Jiang42d279f2020-01-21 16:44:29 -0700310 idxd->major = idxd_cdev_get_major(idxd);
311
Dave Jiangbfe1d562020-01-21 16:43:59 -0700312 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
313 return 0;
314
315 err_idr_fail:
316 idxd_mask_error_interrupts(idxd);
317 idxd_mask_msix_vectors(idxd);
318 err_setup:
319 return rc;
320}
321
322static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
323{
324 void __iomem * const *iomap;
325 struct device *dev = &pdev->dev;
326 struct idxd_device *idxd;
327 int rc;
328 unsigned int mask;
329
330 rc = pcim_enable_device(pdev);
331 if (rc)
332 return rc;
333
334 dev_dbg(dev, "Mapping BARs\n");
335 mask = (1 << IDXD_MMIO_BAR);
336 rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
337 if (rc)
338 return rc;
339
340 iomap = pcim_iomap_table(pdev);
341 if (!iomap)
342 return -ENOMEM;
343
344 dev_dbg(dev, "Set DMA masks\n");
345 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
346 if (rc)
347 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
348 if (rc)
349 return rc;
350
351 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
352 if (rc)
353 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
354 if (rc)
355 return rc;
356
357 dev_dbg(dev, "Alloc IDXD context\n");
358 idxd = idxd_alloc(pdev, iomap);
359 if (!idxd)
360 return -ENOMEM;
361
362 idxd_set_type(idxd);
363
364 dev_dbg(dev, "Set PCI master\n");
365 pci_set_master(pdev);
366 pci_set_drvdata(pdev, idxd);
367
368 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
369 rc = idxd_probe(idxd);
370 if (rc) {
371 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
372 return -ENODEV;
373 }
374
Dave Jiangc52ca472020-01-21 16:44:05 -0700375 rc = idxd_setup_sysfs(idxd);
376 if (rc) {
377 dev_err(dev, "IDXD sysfs setup failed\n");
378 return -ENODEV;
379 }
380
381 idxd->state = IDXD_DEV_CONF_READY;
382
Dave Jiangbfe1d562020-01-21 16:43:59 -0700383 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
384 idxd->hw.version);
385
386 return 0;
387}
388
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700389static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
390{
391 struct idxd_desc *desc, *itr;
392 struct llist_node *head;
393
394 head = llist_del_all(&ie->pending_llist);
395 if (!head)
396 return;
397
398 llist_for_each_entry_safe(desc, itr, head, llnode) {
399 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
400 idxd_free_desc(desc->wq, desc);
401 }
402}
403
404static void idxd_flush_work_list(struct idxd_irq_entry *ie)
405{
406 struct idxd_desc *desc, *iter;
407
408 list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
409 list_del(&desc->list);
410 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
411 idxd_free_desc(desc->wq, desc);
412 }
413}
414
Dave Jiangbfe1d562020-01-21 16:43:59 -0700415static void idxd_shutdown(struct pci_dev *pdev)
416{
417 struct idxd_device *idxd = pci_get_drvdata(pdev);
418 int rc, i;
419 struct idxd_irq_entry *irq_entry;
420 int msixcnt = pci_msix_vec_count(pdev);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700421
Dave Jiangbfe1d562020-01-21 16:43:59 -0700422 rc = idxd_device_disable(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700423 if (rc)
424 dev_err(&pdev->dev, "Disabling device failed\n");
425
426 dev_dbg(&pdev->dev, "%s called\n", __func__);
427 idxd_mask_msix_vectors(idxd);
428 idxd_mask_error_interrupts(idxd);
429
430 for (i = 0; i < msixcnt; i++) {
431 irq_entry = &idxd->irq_entries[i];
432 synchronize_irq(idxd->msix_entries[i].vector);
433 if (i == 0)
434 continue;
Dave Jiang8f47d1a2020-01-21 16:44:23 -0700435 idxd_flush_pending_llist(irq_entry);
436 idxd_flush_work_list(irq_entry);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700437 }
Dave Jiang0d5c10b2020-06-26 11:11:18 -0700438
439 destroy_workqueue(idxd->wq);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700440}
441
442static void idxd_remove(struct pci_dev *pdev)
443{
444 struct idxd_device *idxd = pci_get_drvdata(pdev);
445
446 dev_dbg(&pdev->dev, "%s called\n", __func__);
Dave Jiangc52ca472020-01-21 16:44:05 -0700447 idxd_cleanup_sysfs(idxd);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700448 idxd_shutdown(pdev);
Dave Jiangbfe1d562020-01-21 16:43:59 -0700449 mutex_lock(&idxd_idr_lock);
450 idr_remove(&idxd_idrs[idxd->type], idxd->id);
451 mutex_unlock(&idxd_idr_lock);
452}
453
454static struct pci_driver idxd_pci_driver = {
455 .name = DRV_NAME,
456 .id_table = idxd_pci_tbl,
457 .probe = idxd_pci_probe,
458 .remove = idxd_remove,
459 .shutdown = idxd_shutdown,
460};
461
462static int __init idxd_init_module(void)
463{
464 int err, i;
465
466 /*
467 * If the CPU does not support write512, there's no point in
468 * enumerating the device. We can not utilize it.
469 */
470 if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
471 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
472 return -ENODEV;
473 }
474
475 pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
476 DRV_NAME, IDXD_DRIVER_VERSION);
477
478 mutex_init(&idxd_idr_lock);
479 for (i = 0; i < IDXD_TYPE_MAX; i++)
480 idr_init(&idxd_idrs[i]);
481
Dave Jiangc52ca472020-01-21 16:44:05 -0700482 err = idxd_register_bus_type();
483 if (err < 0)
Dave Jiangbfe1d562020-01-21 16:43:59 -0700484 return err;
485
Dave Jiangc52ca472020-01-21 16:44:05 -0700486 err = idxd_register_driver();
487 if (err < 0)
488 goto err_idxd_driver_register;
489
Dave Jiang42d279f2020-01-21 16:44:29 -0700490 err = idxd_cdev_register();
491 if (err)
492 goto err_cdev_register;
493
Dave Jiangc52ca472020-01-21 16:44:05 -0700494 err = pci_register_driver(&idxd_pci_driver);
495 if (err)
496 goto err_pci_register;
497
Dave Jiangbfe1d562020-01-21 16:43:59 -0700498 return 0;
Dave Jiangc52ca472020-01-21 16:44:05 -0700499
500err_pci_register:
Dave Jiang42d279f2020-01-21 16:44:29 -0700501 idxd_cdev_remove();
502err_cdev_register:
Dave Jiangc52ca472020-01-21 16:44:05 -0700503 idxd_unregister_driver();
504err_idxd_driver_register:
505 idxd_unregister_bus_type();
506 return err;
Dave Jiangbfe1d562020-01-21 16:43:59 -0700507}
508module_init(idxd_init_module);
509
510static void __exit idxd_exit_module(void)
511{
512 pci_unregister_driver(&idxd_pci_driver);
Dave Jiang42d279f2020-01-21 16:44:29 -0700513 idxd_cdev_remove();
Dave Jiangc52ca472020-01-21 16:44:05 -0700514 idxd_unregister_bus_type();
Dave Jiangbfe1d562020-01-21 16:43:59 -0700515}
516module_exit(idxd_exit_module);