blob: 4d68719e608f769cc1bebfaf1cff1c7dfe512811 [file] [log] [blame]
Zhang Yi72ddd9f2018-06-30 08:53:19 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Device Feature List (DFL) PCIe device
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Zhang Yi <Yi.Z.Zhang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17#include <linux/pci.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/stddef.h>
22#include <linux/errno.h>
23#include <linux/aer.h>
24
Wu Hao968b8192018-06-30 08:53:20 +080025#include "dfl.h"
26
Zhang Yi72ddd9f2018-06-30 08:53:19 +080027#define DRV_VERSION "0.8"
28#define DRV_NAME "dfl-pci"
29
Matthew Gerlachfa41d102021-01-06 20:37:08 -080030#define PCI_VSEC_ID_INTEL_DFLS 0x43
31
32#define PCI_VNDR_DFLS_CNT 0x8
33#define PCI_VNDR_DFLS_RES 0xc
34
35#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
36#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
37
Wu Hao968b8192018-06-30 08:53:20 +080038struct cci_drvdata {
39 struct dfl_fpga_cdev *cdev; /* container device */
40};
41
Xu Yilun89eb35e2020-08-19 15:45:19 +080042static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
Wu Hao968b8192018-06-30 08:53:20 +080043{
Xu Yilun89eb35e2020-08-19 15:45:19 +080044 if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
Wu Hao968b8192018-06-30 08:53:20 +080045 return NULL;
46
Xu Yilun89eb35e2020-08-19 15:45:19 +080047 return pcim_iomap_table(pcidev)[0];
Wu Hao968b8192018-06-30 08:53:20 +080048}
49
Xu Yilunbfef9462020-06-16 12:08:43 +080050static int cci_pci_alloc_irq(struct pci_dev *pcidev)
51{
52 int ret, nvec = pci_msix_vec_count(pcidev);
53
54 if (nvec <= 0) {
55 dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
56 return 0;
57 }
58
59 ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
60 if (ret < 0)
61 return ret;
62
63 return nvec;
64}
65
66static void cci_pci_free_irq(struct pci_dev *pcidev)
67{
68 pci_free_irq_vectors(pcidev);
69}
70
Zhang Yi72ddd9f2018-06-30 08:53:19 +080071/* PCI Device ID */
Russ Weighta78a51a2021-04-05 16:52:59 -070072#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
73#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
74#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
75#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
76#define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
Martin Hundebøll82fb70b2021-06-25 09:42:09 +020077#define PCIE_DEVICE_ID_SILICOM_PAC_N5010 0x1000
78#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
79
Zhang Yi72ddd9f2018-06-30 08:53:19 +080080/* VF Device */
Russ Weighta78a51a2021-04-05 16:52:59 -070081#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
82#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
83#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
84#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
Zhang Yi72ddd9f2018-06-30 08:53:19 +080085
86static struct pci_device_id cci_pcie_id_tbl[] = {
87 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
88 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
89 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
90 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
91 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
92 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
Xu Yiluneacfbf52020-07-13 09:47:46 +080093 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
Russ Weighta78a51a2021-04-05 16:52:59 -070094 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
95 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
Martin Hundebøll82fb70b2021-06-25 09:42:09 +020096 {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
97 {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
Zhang Yi72ddd9f2018-06-30 08:53:19 +080098 {0,}
99};
100MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
101
Wu Hao968b8192018-06-30 08:53:20 +0800102static int cci_init_drvdata(struct pci_dev *pcidev)
103{
104 struct cci_drvdata *drvdata;
105
106 drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
107 if (!drvdata)
108 return -ENOMEM;
109
110 pci_set_drvdata(pcidev, drvdata);
111
112 return 0;
113}
114
115static void cci_remove_feature_devs(struct pci_dev *pcidev)
116{
117 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
118
119 /* remove all children feature devices */
120 dfl_fpga_feature_devs_remove(drvdata->cdev);
Xu Yilunbfef9462020-06-16 12:08:43 +0800121 cci_pci_free_irq(pcidev);
122}
123
124static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
125{
126 unsigned int i;
127 int *table;
128
129 table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
130 if (!table)
131 return table;
132
133 for (i = 0; i < nvec; i++)
134 table[i] = pci_irq_vector(pcidev, i);
135
136 return table;
Wu Hao968b8192018-06-30 08:53:20 +0800137}
138
Matthew Gerlachfa41d102021-01-06 20:37:08 -0800139static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
140{
141 u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
142 int dfl_res_off, i, bars, voff = 0;
143 resource_size_t start, len;
144
145 while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
146 vndr_hdr = 0;
147 pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
148
149 if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
150 pcidev->vendor == PCI_VENDOR_ID_INTEL)
151 break;
152 }
153
154 if (!voff) {
155 dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
156 return -ENODEV;
157 }
158
159 dfl_cnt = 0;
160 pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
161 if (dfl_cnt > PCI_STD_NUM_BARS) {
162 dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
163 __func__, dfl_cnt, PCI_STD_NUM_BARS);
164 return -EINVAL;
165 }
166
167 dfl_res_off = voff + PCI_VNDR_DFLS_RES;
168 if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
169 dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
170 __func__);
171 return -EINVAL;
172 }
173
174 for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
175 dfl_res = GENMASK(31, 0);
176 pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
177
178 bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
179 if (bir >= PCI_STD_NUM_BARS) {
180 dev_err(&pcidev->dev, "%s bad bir number %d\n",
181 __func__, bir);
182 return -EINVAL;
183 }
184
185 if (bars & BIT(bir)) {
186 dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
187 __func__, bir);
188 return -EINVAL;
189 }
190
191 bars |= BIT(bir);
192
193 len = pci_resource_len(pcidev, bir);
194 offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
195 if (offset >= len) {
196 dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
197 __func__, offset, &len);
198 return -EINVAL;
199 }
200
201 dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
202
203 len -= offset;
204
205 start = pci_resource_start(pcidev, bir) + offset;
206
207 dfl_fpga_enum_info_add_dfl(info, start, len);
208 }
209
210 return 0;
211}
212
Matthew Gerlach3e265f82021-01-06 20:37:07 -0800213/* default method of finding dfls starting at offset 0 of bar 0 */
214static int find_dfls_by_default(struct pci_dev *pcidev,
215 struct dfl_fpga_enum_info *info)
Wu Hao968b8192018-06-30 08:53:20 +0800216{
Matthew Gerlach3e265f82021-01-06 20:37:07 -0800217 int port_num, bar, i, ret = 0;
Wu Hao968b8192018-06-30 08:53:20 +0800218 resource_size_t start, len;
Wu Hao968b8192018-06-30 08:53:20 +0800219 void __iomem *base;
220 u32 offset;
221 u64 v;
222
Matthew Gerlach3e265f82021-01-06 20:37:07 -0800223 /* start to find Device Feature List from Bar 0 */
Xu Yilun89eb35e2020-08-19 15:45:19 +0800224 base = cci_pci_ioremap_bar0(pcidev);
Matthew Gerlach3e265f82021-01-06 20:37:07 -0800225 if (!base)
226 return -ENOMEM;
Wu Hao968b8192018-06-30 08:53:20 +0800227
228 /*
229 * PF device has FME and Ports/AFUs, and VF device only has one
230 * Port/AFU. Check them and add related "Device Feature List" info
231 * for the next step enumeration.
232 */
233 if (dfl_feature_is_fme(base)) {
234 start = pci_resource_start(pcidev, 0);
235 len = pci_resource_len(pcidev, 0);
236
Xu Yilun89eb35e2020-08-19 15:45:19 +0800237 dfl_fpga_enum_info_add_dfl(info, start, len);
Wu Hao968b8192018-06-30 08:53:20 +0800238
239 /*
240 * find more Device Feature Lists (e.g. Ports) per information
241 * indicated by FME module.
242 */
243 v = readq(base + FME_HDR_CAP);
244 port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
245
246 WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
247
248 for (i = 0; i < port_num; i++) {
249 v = readq(base + FME_HDR_PORT_OFST(i));
250
251 /* skip ports which are not implemented. */
252 if (!(v & FME_PORT_OFST_IMP))
253 continue;
254
255 /*
256 * add Port's Device Feature List information for next
257 * step enumeration.
258 */
259 bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
260 offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
Wu Hao968b8192018-06-30 08:53:20 +0800261 start = pci_resource_start(pcidev, bar) + offset;
262 len = pci_resource_len(pcidev, bar) - offset;
263
Xu Yilun89eb35e2020-08-19 15:45:19 +0800264 dfl_fpga_enum_info_add_dfl(info, start, len);
Wu Hao968b8192018-06-30 08:53:20 +0800265 }
266 } else if (dfl_feature_is_port(base)) {
267 start = pci_resource_start(pcidev, 0);
268 len = pci_resource_len(pcidev, 0);
269
Xu Yilun89eb35e2020-08-19 15:45:19 +0800270 dfl_fpga_enum_info_add_dfl(info, start, len);
Wu Hao968b8192018-06-30 08:53:20 +0800271 } else {
272 ret = -ENODEV;
Wu Hao968b8192018-06-30 08:53:20 +0800273 }
274
Xu Yilun89eb35e2020-08-19 15:45:19 +0800275 /* release I/O mappings for next step enumeration */
276 pcim_iounmap_regions(pcidev, BIT(0));
277
Matthew Gerlach3e265f82021-01-06 20:37:07 -0800278 return ret;
279}
280
281/* enumerate feature devices under pci device */
282static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
283{
284 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
285 struct dfl_fpga_enum_info *info;
286 struct dfl_fpga_cdev *cdev;
287 int nvec, ret = 0;
288 int *irq_table;
289
290 /* allocate enumeration info via pci_dev */
291 info = dfl_fpga_enum_info_alloc(&pcidev->dev);
292 if (!info)
293 return -ENOMEM;
294
295 /* add irq info for enumeration if the device support irq */
296 nvec = cci_pci_alloc_irq(pcidev);
297 if (nvec < 0) {
298 dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
299 ret = nvec;
300 goto enum_info_free_exit;
301 } else if (nvec) {
302 irq_table = cci_pci_create_irq_table(pcidev, nvec);
303 if (!irq_table) {
304 ret = -ENOMEM;
305 goto irq_free_exit;
306 }
307
308 ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
309 kfree(irq_table);
310 if (ret)
311 goto irq_free_exit;
312 }
313
Matthew Gerlachfa41d102021-01-06 20:37:08 -0800314 ret = find_dfls_by_vsec(pcidev, info);
315 if (ret == -ENODEV)
316 ret = find_dfls_by_default(pcidev, info);
317
Matthew Gerlach3e265f82021-01-06 20:37:07 -0800318 if (ret)
319 goto irq_free_exit;
320
Wu Hao968b8192018-06-30 08:53:20 +0800321 /* start enumeration with prepared enumeration information */
322 cdev = dfl_fpga_feature_devs_enumerate(info);
323 if (IS_ERR(cdev)) {
324 dev_err(&pcidev->dev, "Enumeration failure\n");
325 ret = PTR_ERR(cdev);
Xu Yilunbfef9462020-06-16 12:08:43 +0800326 goto irq_free_exit;
Wu Hao968b8192018-06-30 08:53:20 +0800327 }
328
329 drvdata->cdev = cdev;
330
Xu Yilunbfef9462020-06-16 12:08:43 +0800331irq_free_exit:
332 if (ret)
333 cci_pci_free_irq(pcidev);
Wu Hao968b8192018-06-30 08:53:20 +0800334enum_info_free_exit:
335 dfl_fpga_enum_info_free(info);
336
337 return ret;
338}
339
Zhang Yi72ddd9f2018-06-30 08:53:19 +0800340static
341int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
342{
343 int ret;
344
345 ret = pcim_enable_device(pcidev);
346 if (ret < 0) {
347 dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
348 return ret;
349 }
350
351 ret = pci_enable_pcie_error_reporting(pcidev);
352 if (ret && ret != -EINVAL)
353 dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
354
355 pci_set_master(pcidev);
356
357 if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
358 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
359 if (ret)
360 goto disable_error_report_exit;
361 } else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
362 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
363 if (ret)
364 goto disable_error_report_exit;
365 } else {
366 ret = -EIO;
367 dev_err(&pcidev->dev, "No suitable DMA support available.\n");
368 goto disable_error_report_exit;
369 }
370
Wu Hao968b8192018-06-30 08:53:20 +0800371 ret = cci_init_drvdata(pcidev);
372 if (ret) {
373 dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
374 goto disable_error_report_exit;
375 }
376
377 ret = cci_enumerate_feature_devs(pcidev);
Xu Yilunbfef9462020-06-16 12:08:43 +0800378 if (!ret)
379 return ret;
Wu Hao968b8192018-06-30 08:53:20 +0800380
Xu Yilunbfef9462020-06-16 12:08:43 +0800381 dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
Zhang Yi72ddd9f2018-06-30 08:53:19 +0800382
383disable_error_report_exit:
384 pci_disable_pcie_error_reporting(pcidev);
385 return ret;
386}
387
Wu Haobdd4f302019-08-04 18:20:12 +0800388static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
389{
390 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
391 struct dfl_fpga_cdev *cdev = drvdata->cdev;
Wu Haobdd4f302019-08-04 18:20:12 +0800392
393 if (!num_vfs) {
394 /*
395 * disable SRIOV and then put released ports back to default
396 * PF access mode.
397 */
398 pci_disable_sriov(pcidev);
399
400 dfl_fpga_cdev_config_ports_pf(cdev);
401
402 } else {
Xu Yilune19485d2020-07-13 14:10:02 +0800403 int ret;
404
Wu Haobdd4f302019-08-04 18:20:12 +0800405 /*
406 * before enable SRIOV, put released ports into VF access mode
407 * first of all.
408 */
409 ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
410 if (ret)
411 return ret;
412
413 ret = pci_enable_sriov(pcidev, num_vfs);
Xu Yilun3c2760b2020-02-25 14:07:18 +0800414 if (ret) {
Wu Haobdd4f302019-08-04 18:20:12 +0800415 dfl_fpga_cdev_config_ports_pf(cdev);
Xu Yilun3c2760b2020-02-25 14:07:18 +0800416 return ret;
417 }
Wu Haobdd4f302019-08-04 18:20:12 +0800418 }
419
Xu Yilun3c2760b2020-02-25 14:07:18 +0800420 return num_vfs;
Wu Haobdd4f302019-08-04 18:20:12 +0800421}
422
Zhang Yi72ddd9f2018-06-30 08:53:19 +0800423static void cci_pci_remove(struct pci_dev *pcidev)
424{
Wu Haobdd4f302019-08-04 18:20:12 +0800425 if (dev_is_pf(&pcidev->dev))
426 cci_pci_sriov_configure(pcidev, 0);
427
Wu Hao968b8192018-06-30 08:53:20 +0800428 cci_remove_feature_devs(pcidev);
Zhang Yi72ddd9f2018-06-30 08:53:19 +0800429 pci_disable_pcie_error_reporting(pcidev);
430}
431
432static struct pci_driver cci_pci_driver = {
433 .name = DRV_NAME,
434 .id_table = cci_pcie_id_tbl,
435 .probe = cci_pci_probe,
436 .remove = cci_pci_remove,
Wu Haobdd4f302019-08-04 18:20:12 +0800437 .sriov_configure = cci_pci_sriov_configure,
Zhang Yi72ddd9f2018-06-30 08:53:19 +0800438};
439
440module_pci_driver(cci_pci_driver);
441
442MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
443MODULE_AUTHOR("Intel Corporation");
444MODULE_LICENSE("GPL v2");