blob: f24c3600be73fb8392918590dd932a0f41a89583 [file] [log] [blame]
Bjorn Helgaas7328c8f2018-01-26 11:45:16 -06001// SPDX-License-Identifier: GPL-2.0
Matt Carlsonb55ac1b2010-02-26 14:04:41 +00002/*
3 * File: vpd.c
4 * Purpose: Provide PCI VPD support
5 *
6 * Copyright (C) 2010 Broadcom Corporation.
7 */
8
9#include <linux/pci.h>
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050010#include <linux/delay.h>
Paul Gortmaker363c75d2011-05-27 09:37:25 -040011#include <linux/export.h>
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050012#include <linux/sched/signal.h>
13#include "pci.h"
14
15/* VPD access through PCI 2.2+ VPD capability */
16
Bjorn Helgaasf9ea8942018-03-19 13:06:34 -050017struct pci_vpd_ops {
18 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
19 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
20 int (*set_size)(struct pci_dev *dev, size_t len);
21};
22
23struct pci_vpd {
24 const struct pci_vpd_ops *ops;
25 struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */
26 struct mutex lock;
27 unsigned int len;
28 u16 flag;
29 u8 cap;
30 unsigned int busy:1;
31 unsigned int valid:1;
32};
33
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050034/**
35 * pci_read_vpd - Read one entry from Vital Product Data
36 * @dev: pci device struct
37 * @pos: offset in vpd space
38 * @count: number of bytes to read
39 * @buf: pointer to where to store result
40 */
41ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
42{
43 if (!dev->vpd || !dev->vpd->ops)
44 return -ENODEV;
45 return dev->vpd->ops->read(dev, pos, count, buf);
46}
47EXPORT_SYMBOL(pci_read_vpd);
48
49/**
50 * pci_write_vpd - Write entry to Vital Product Data
51 * @dev: pci device struct
52 * @pos: offset in vpd space
53 * @count: number of bytes to write
54 * @buf: buffer containing write data
55 */
56ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
57{
58 if (!dev->vpd || !dev->vpd->ops)
59 return -ENODEV;
60 return dev->vpd->ops->write(dev, pos, count, buf);
61}
62EXPORT_SYMBOL(pci_write_vpd);
63
64/**
65 * pci_set_vpd_size - Set size of Vital Product Data space
66 * @dev: pci device struct
67 * @len: size of vpd space
68 */
69int pci_set_vpd_size(struct pci_dev *dev, size_t len)
70{
71 if (!dev->vpd || !dev->vpd->ops)
72 return -ENODEV;
73 return dev->vpd->ops->set_size(dev, len);
74}
75EXPORT_SYMBOL(pci_set_vpd_size);
76
77#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
78
79/**
80 * pci_vpd_size - determine actual size of Vital Product Data
81 * @dev: pci device struct
82 * @old_size: current assumed size, also maximum allowed size
83 */
84static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
85{
86 size_t off = 0;
87 unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
88
89 while (off < old_size &&
90 pci_read_vpd(dev, off, 1, header) == 1) {
91 unsigned char tag;
92
93 if (header[0] & PCI_VPD_LRDT) {
94 /* Large Resource Data Type Tag */
95 tag = pci_vpd_lrdt_tag(header);
96 /* Only read length from known tag items */
97 if ((tag == PCI_VPD_LTIN_ID_STRING) ||
98 (tag == PCI_VPD_LTIN_RO_DATA) ||
99 (tag == PCI_VPD_LTIN_RW_DATA)) {
100 if (pci_read_vpd(dev, off+1, 2,
101 &header[1]) != 2) {
102 pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
103 tag, off + 1);
104 return 0;
105 }
106 off += PCI_VPD_LRDT_TAG_SIZE +
107 pci_vpd_lrdt_size(header);
108 }
109 } else {
110 /* Short Resource Data Type Tag */
111 off += PCI_VPD_SRDT_TAG_SIZE +
112 pci_vpd_srdt_size(header);
113 tag = pci_vpd_srdt_tag(header);
114 }
115
116 if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
117 return off;
118
119 if ((tag != PCI_VPD_LTIN_ID_STRING) &&
120 (tag != PCI_VPD_LTIN_RO_DATA) &&
121 (tag != PCI_VPD_LTIN_RW_DATA)) {
122 pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
123 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
124 tag, off);
125 return 0;
126 }
127 }
128 return 0;
129}
130
131/*
132 * Wait for last operation to complete.
133 * This code has to spin since there is no other notification from the PCI
134 * hardware. Since the VPD is often implemented by serial attachment to an
135 * EEPROM, it may take many milliseconds to complete.
136 *
137 * Returns 0 on success, negative values indicate error.
138 */
139static int pci_vpd_wait(struct pci_dev *dev)
140{
141 struct pci_vpd *vpd = dev->vpd;
142 unsigned long timeout = jiffies + msecs_to_jiffies(125);
143 unsigned long max_sleep = 16;
144 u16 status;
145 int ret;
146
147 if (!vpd->busy)
148 return 0;
149
150 while (time_before(jiffies, timeout)) {
151 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
152 &status);
153 if (ret < 0)
154 return ret;
155
156 if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
157 vpd->busy = 0;
158 return 0;
159 }
160
161 if (fatal_signal_pending(current))
162 return -EINTR;
163
164 usleep_range(10, max_sleep);
165 if (max_sleep < 1024)
166 max_sleep *= 2;
167 }
168
169 pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
170 return -ETIMEDOUT;
171}
172
173static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
174 void *arg)
175{
176 struct pci_vpd *vpd = dev->vpd;
177 int ret;
178 loff_t end = pos + count;
179 u8 *buf = arg;
180
181 if (pos < 0)
182 return -EINVAL;
183
184 if (!vpd->valid) {
185 vpd->valid = 1;
186 vpd->len = pci_vpd_size(dev, vpd->len);
187 }
188
189 if (vpd->len == 0)
190 return -EIO;
191
192 if (pos > vpd->len)
193 return 0;
194
195 if (end > vpd->len) {
196 end = vpd->len;
197 count = end - pos;
198 }
199
200 if (mutex_lock_killable(&vpd->lock))
201 return -EINTR;
202
203 ret = pci_vpd_wait(dev);
204 if (ret < 0)
205 goto out;
206
207 while (pos < end) {
208 u32 val;
209 unsigned int i, skip;
210
211 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
212 pos & ~3);
213 if (ret < 0)
214 break;
215 vpd->busy = 1;
216 vpd->flag = PCI_VPD_ADDR_F;
217 ret = pci_vpd_wait(dev);
218 if (ret < 0)
219 break;
220
221 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
222 if (ret < 0)
223 break;
224
225 skip = pos & 3;
226 for (i = 0; i < sizeof(u32); i++) {
227 if (i >= skip) {
228 *buf++ = val;
229 if (++pos == end)
230 break;
231 }
232 val >>= 8;
233 }
234 }
235out:
236 mutex_unlock(&vpd->lock);
237 return ret ? ret : count;
238}
239
240static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
241 const void *arg)
242{
243 struct pci_vpd *vpd = dev->vpd;
244 const u8 *buf = arg;
245 loff_t end = pos + count;
246 int ret = 0;
247
248 if (pos < 0 || (pos & 3) || (count & 3))
249 return -EINVAL;
250
251 if (!vpd->valid) {
252 vpd->valid = 1;
253 vpd->len = pci_vpd_size(dev, vpd->len);
254 }
255
256 if (vpd->len == 0)
257 return -EIO;
258
259 if (end > vpd->len)
260 return -EINVAL;
261
262 if (mutex_lock_killable(&vpd->lock))
263 return -EINTR;
264
265 ret = pci_vpd_wait(dev);
266 if (ret < 0)
267 goto out;
268
269 while (pos < end) {
270 u32 val;
271
272 val = *buf++;
273 val |= *buf++ << 8;
274 val |= *buf++ << 16;
275 val |= *buf++ << 24;
276
277 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
278 if (ret < 0)
279 break;
280 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
281 pos | PCI_VPD_ADDR_F);
282 if (ret < 0)
283 break;
284
285 vpd->busy = 1;
286 vpd->flag = 0;
287 ret = pci_vpd_wait(dev);
288 if (ret < 0)
289 break;
290
291 pos += sizeof(u32);
292 }
293out:
294 mutex_unlock(&vpd->lock);
295 return ret ? ret : count;
296}
297
298static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
299{
300 struct pci_vpd *vpd = dev->vpd;
301
302 if (len == 0 || len > PCI_VPD_MAX_SIZE)
303 return -EIO;
304
305 vpd->valid = 1;
306 vpd->len = len;
307
308 return 0;
309}
310
311static const struct pci_vpd_ops pci_vpd_ops = {
312 .read = pci_vpd_read,
313 .write = pci_vpd_write,
314 .set_size = pci_vpd_set_size,
315};
316
317static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
318 void *arg)
319{
320 struct pci_dev *tdev = pci_get_slot(dev->bus,
321 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
322 ssize_t ret;
323
324 if (!tdev)
325 return -ENODEV;
326
327 ret = pci_read_vpd(tdev, pos, count, arg);
328 pci_dev_put(tdev);
329 return ret;
330}
331
332static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
333 const void *arg)
334{
335 struct pci_dev *tdev = pci_get_slot(dev->bus,
336 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
337 ssize_t ret;
338
339 if (!tdev)
340 return -ENODEV;
341
342 ret = pci_write_vpd(tdev, pos, count, arg);
343 pci_dev_put(tdev);
344 return ret;
345}
346
347static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
348{
349 struct pci_dev *tdev = pci_get_slot(dev->bus,
350 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
351 int ret;
352
353 if (!tdev)
354 return -ENODEV;
355
356 ret = pci_set_vpd_size(tdev, len);
357 pci_dev_put(tdev);
358 return ret;
359}
360
361static const struct pci_vpd_ops pci_vpd_f0_ops = {
362 .read = pci_vpd_f0_read,
363 .write = pci_vpd_f0_write,
364 .set_size = pci_vpd_f0_set_size,
365};
366
367int pci_vpd_init(struct pci_dev *dev)
368{
369 struct pci_vpd *vpd;
370 u8 cap;
371
372 cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
373 if (!cap)
374 return -ENODEV;
375
376 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
377 if (!vpd)
378 return -ENOMEM;
379
380 vpd->len = PCI_VPD_MAX_SIZE;
381 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
382 vpd->ops = &pci_vpd_f0_ops;
383 else
384 vpd->ops = &pci_vpd_ops;
385 mutex_init(&vpd->lock);
386 vpd->cap = cap;
387 vpd->busy = 0;
388 vpd->valid = 0;
389 dev->vpd = vpd;
390 return 0;
391}
392
393void pci_vpd_release(struct pci_dev *dev)
394{
395 kfree(dev->vpd);
396}
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000397
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500398static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
399 struct bin_attribute *bin_attr, char *buf,
400 loff_t off, size_t count)
401{
402 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
403
404 if (bin_attr->size > 0) {
405 if (off > bin_attr->size)
406 count = 0;
407 else if (count > bin_attr->size - off)
408 count = bin_attr->size - off;
409 }
410
411 return pci_read_vpd(dev, off, count, buf);
412}
413
414static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
415 struct bin_attribute *bin_attr, char *buf,
416 loff_t off, size_t count)
417{
418 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
419
420 if (bin_attr->size > 0) {
421 if (off > bin_attr->size)
422 count = 0;
423 else if (count > bin_attr->size - off)
424 count = bin_attr->size - off;
425 }
426
427 return pci_write_vpd(dev, off, count, buf);
428}
429
430void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev)
431{
432 int retval;
433 struct bin_attribute *attr;
434
435 if (!dev->vpd)
436 return;
437
438 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
439 if (!attr)
440 return;
441
442 sysfs_bin_attr_init(attr);
443 attr->size = 0;
444 attr->attr.name = "vpd";
445 attr->attr.mode = S_IRUSR | S_IWUSR;
446 attr->read = read_vpd_attr;
447 attr->write = write_vpd_attr;
448 retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
449 if (retval) {
450 kfree(attr);
451 return;
452 }
453
454 dev->vpd->attr = attr;
455}
456
457void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev)
458{
459 if (dev->vpd && dev->vpd->attr) {
460 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
461 kfree(dev->vpd->attr);
462 }
463}
464
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000465int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
466{
467 int i;
468
469 for (i = off; i < len; ) {
470 u8 val = buf[i];
471
472 if (val & PCI_VPD_LRDT) {
473 /* Don't return success of the tag isn't complete */
474 if (i + PCI_VPD_LRDT_TAG_SIZE > len)
475 break;
476
477 if (val == rdt)
478 return i;
479
480 i += PCI_VPD_LRDT_TAG_SIZE +
481 pci_vpd_lrdt_size(&buf[i]);
482 } else {
483 u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK;
484
485 if (tag == rdt)
486 return i;
487
488 if (tag == PCI_VPD_SRDT_END)
489 break;
490
491 i += PCI_VPD_SRDT_TAG_SIZE +
492 pci_vpd_srdt_size(&buf[i]);
493 }
494 }
495
496 return -ENOENT;
497}
498EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
Matt Carlson4067a852010-02-26 14:04:43 +0000499
500int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
501 unsigned int len, const char *kw)
502{
503 int i;
504
505 for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
506 if (buf[i + 0] == kw[0] &&
507 buf[i + 1] == kw[1])
508 return i;
509
510 i += PCI_VPD_INFO_FLD_HDR_SIZE +
511 pci_vpd_info_field_size(&buf[i]);
512 }
513
514 return -ENOENT;
515}
516EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
Bjorn Helgaas99605852018-03-19 13:06:24 -0500517
518#ifdef CONFIG_PCI_QUIRKS
519/*
520 * Quirk non-zero PCI functions to route VPD access through function 0 for
521 * devices that share VPD resources between functions. The functions are
522 * expected to be identical devices.
523 */
524static void quirk_f0_vpd_link(struct pci_dev *dev)
525{
526 struct pci_dev *f0;
527
528 if (!PCI_FUNC(dev->devfn))
529 return;
530
531 f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
532 if (!f0)
533 return;
534
535 if (f0->vpd && dev->class == f0->class &&
536 dev->vendor == f0->vendor && dev->device == f0->device)
537 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
538
539 pci_dev_put(f0);
540}
541DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
542 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
543
544/*
545 * If a device follows the VPD format spec, the PCI core will not read or
546 * write past the VPD End Tag. But some vendors do not follow the VPD
547 * format spec, so we can't tell how much data is safe to access. Devices
548 * may behave unpredictably if we access too much. Blacklist these devices
549 * so we don't touch VPD at all.
550 */
551static void quirk_blacklist_vpd(struct pci_dev *dev)
552{
553 if (dev->vpd) {
554 dev->vpd->len = 0;
555 pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
556 }
557}
558DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
559DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
560DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
561DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
562DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
563DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
564DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
565DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
566DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
567DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
568DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
569DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
570 quirk_blacklist_vpd);
571DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
572
573/*
574 * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
575 * VPD end tag will hang the device. This problem was initially
576 * observed when a vpd entry was created in sysfs
577 * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
578 * will dump 32k of data. Reading a full 32k will cause an access
579 * beyond the VPD end tag causing the device to hang. Once the device
580 * is hung, the bnx2 driver will not be able to reset the device.
581 * We believe that it is legal to read beyond the end tag and
582 * therefore the solution is to limit the read/write length.
583 */
584static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
585{
586 /*
587 * Only disable the VPD capability for 5706, 5706S, 5708,
588 * 5708S and 5709 rev. A
589 */
590 if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
591 (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
592 (dev->device == PCI_DEVICE_ID_NX2_5708) ||
593 (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
594 ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
595 (dev->revision & 0xf0) == 0x0)) {
596 if (dev->vpd)
597 dev->vpd->len = 0x80;
598 }
599}
600DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
601 PCI_DEVICE_ID_NX2_5706,
602 quirk_brcm_570x_limit_vpd);
603DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
604 PCI_DEVICE_ID_NX2_5706S,
605 quirk_brcm_570x_limit_vpd);
606DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
607 PCI_DEVICE_ID_NX2_5708,
608 quirk_brcm_570x_limit_vpd);
609DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
610 PCI_DEVICE_ID_NX2_5708S,
611 quirk_brcm_570x_limit_vpd);
612DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
613 PCI_DEVICE_ID_NX2_5709,
614 quirk_brcm_570x_limit_vpd);
615DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
616 PCI_DEVICE_ID_NX2_5709S,
617 quirk_brcm_570x_limit_vpd);
618
619static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
620{
621 pci_set_vpd_size(dev, 8192);
622}
623DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
624DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
625DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
626DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
627DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
628DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
629DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
630DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
631DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
632DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
633DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
634DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
635DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
636#endif