blob: 05e4df0a84d360e0991a2c52254f119851fc0a26 [file] [log] [blame]
Bjorn Helgaas7328c8f2018-01-26 11:45:16 -06001// SPDX-License-Identifier: GPL-2.0
Matt Carlsonb55ac1b2010-02-26 14:04:41 +00002/*
Bjorn Helgaasdf62ab52018-03-09 16:36:33 -06003 * PCI VPD support
Matt Carlsonb55ac1b2010-02-26 14:04:41 +00004 *
5 * Copyright (C) 2010 Broadcom Corporation.
6 */
7
8#include <linux/pci.h>
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -05009#include <linux/delay.h>
Paul Gortmaker363c75d2011-05-27 09:37:25 -040010#include <linux/export.h>
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050011#include <linux/sched/signal.h>
12#include "pci.h"
13
14/* VPD access through PCI 2.2+ VPD capability */
15
Bjorn Helgaasf9ea8942018-03-19 13:06:34 -050016struct pci_vpd_ops {
17 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
18 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
Bjorn Helgaasf9ea8942018-03-19 13:06:34 -050019};
20
21struct pci_vpd {
22 const struct pci_vpd_ops *ops;
Bjorn Helgaasf9ea8942018-03-19 13:06:34 -050023 struct mutex lock;
24 unsigned int len;
25 u16 flag;
26 u8 cap;
27 unsigned int busy:1;
28 unsigned int valid:1;
29};
30
Heiner Kallweit5881b382021-04-16 21:52:07 +020031static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
32{
33 return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
34}
35
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050036/**
37 * pci_read_vpd - Read one entry from Vital Product Data
38 * @dev: pci device struct
39 * @pos: offset in vpd space
40 * @count: number of bytes to read
41 * @buf: pointer to where to store result
42 */
43ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
44{
45 if (!dev->vpd || !dev->vpd->ops)
46 return -ENODEV;
47 return dev->vpd->ops->read(dev, pos, count, buf);
48}
49EXPORT_SYMBOL(pci_read_vpd);
50
51/**
52 * pci_write_vpd - Write entry to Vital Product Data
53 * @dev: pci device struct
54 * @pos: offset in vpd space
55 * @count: number of bytes to write
56 * @buf: buffer containing write data
57 */
58ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
59{
60 if (!dev->vpd || !dev->vpd->ops)
61 return -ENODEV;
62 return dev->vpd->ops->write(dev, pos, count, buf);
63}
64EXPORT_SYMBOL(pci_write_vpd);
65
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050066#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
67
68/**
69 * pci_vpd_size - determine actual size of Vital Product Data
70 * @dev: pci device struct
71 * @old_size: current assumed size, also maximum allowed size
72 */
73static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
74{
75 size_t off = 0;
76 unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
77
Heiner Kallweitd1df5f32021-04-01 14:03:49 +020078 while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) {
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050079 unsigned char tag;
80
Heiner Kallweit4e0d77f2021-07-29 12:22:25 -050081 if (off == 0 && (header[0] == 0x00 || header[0] == 0xff))
82 goto error;
Heiner Kallweitd1df5f32021-04-01 14:03:49 +020083
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050084 if (header[0] & PCI_VPD_LRDT) {
85 /* Large Resource Data Type Tag */
86 tag = pci_vpd_lrdt_tag(header);
87 /* Only read length from known tag items */
88 if ((tag == PCI_VPD_LTIN_ID_STRING) ||
89 (tag == PCI_VPD_LTIN_RO_DATA) ||
90 (tag == PCI_VPD_LTIN_RW_DATA)) {
91 if (pci_read_vpd(dev, off+1, 2,
92 &header[1]) != 2) {
Bjorn Helgaase2cdd862021-07-15 16:59:55 -050093 pci_warn(dev, "failed VPD read at offset %zu\n",
94 off + 1);
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050095 return 0;
96 }
97 off += PCI_VPD_LRDT_TAG_SIZE +
98 pci_vpd_lrdt_size(header);
Bjorn Helgaas70730db2021-07-15 16:59:56 -050099 } else {
100 pci_warn(dev, "invalid large VPD tag %02x at offset %zu\n",
101 tag, off);
102 return 0;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500103 }
104 } else {
105 /* Short Resource Data Type Tag */
106 off += PCI_VPD_SRDT_TAG_SIZE +
107 pci_vpd_srdt_size(header);
108 tag = pci_vpd_srdt_tag(header);
Bjorn Helgaas70730db2021-07-15 16:59:56 -0500109 if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
110 return off;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500111 }
112 }
113 return 0;
Heiner Kallweit4e0d77f2021-07-29 12:22:25 -0500114
115error:
116 pci_info(dev, "invalid VPD tag %#04x at offset %zu%s\n",
117 header[0], off, off == 0 ?
118 "; assume missing optional EEPROM" : "");
119 return 0;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500120}
121
122/*
123 * Wait for last operation to complete.
124 * This code has to spin since there is no other notification from the PCI
125 * hardware. Since the VPD is often implemented by serial attachment to an
126 * EEPROM, it may take many milliseconds to complete.
127 *
128 * Returns 0 on success, negative values indicate error.
129 */
130static int pci_vpd_wait(struct pci_dev *dev)
131{
132 struct pci_vpd *vpd = dev->vpd;
133 unsigned long timeout = jiffies + msecs_to_jiffies(125);
134 unsigned long max_sleep = 16;
135 u16 status;
136 int ret;
137
138 if (!vpd->busy)
139 return 0;
140
Bert Kenward6eaf2782018-07-26 16:21:29 +0100141 do {
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500142 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
143 &status);
144 if (ret < 0)
145 return ret;
146
147 if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
148 vpd->busy = 0;
149 return 0;
150 }
151
152 if (fatal_signal_pending(current))
153 return -EINTR;
154
Bert Kenward6eaf2782018-07-26 16:21:29 +0100155 if (time_after(jiffies, timeout))
156 break;
157
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500158 usleep_range(10, max_sleep);
159 if (max_sleep < 1024)
160 max_sleep *= 2;
Bert Kenward6eaf2782018-07-26 16:21:29 +0100161 } while (true);
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500162
163 pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
164 return -ETIMEDOUT;
165}
166
167static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
168 void *arg)
169{
170 struct pci_vpd *vpd = dev->vpd;
171 int ret;
172 loff_t end = pos + count;
173 u8 *buf = arg;
174
175 if (pos < 0)
176 return -EINVAL;
177
178 if (!vpd->valid) {
179 vpd->valid = 1;
180 vpd->len = pci_vpd_size(dev, vpd->len);
181 }
182
183 if (vpd->len == 0)
184 return -EIO;
185
186 if (pos > vpd->len)
187 return 0;
188
189 if (end > vpd->len) {
190 end = vpd->len;
191 count = end - pos;
192 }
193
194 if (mutex_lock_killable(&vpd->lock))
195 return -EINTR;
196
197 ret = pci_vpd_wait(dev);
198 if (ret < 0)
199 goto out;
200
201 while (pos < end) {
202 u32 val;
203 unsigned int i, skip;
204
205 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
206 pos & ~3);
207 if (ret < 0)
208 break;
209 vpd->busy = 1;
210 vpd->flag = PCI_VPD_ADDR_F;
211 ret = pci_vpd_wait(dev);
212 if (ret < 0)
213 break;
214
215 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
216 if (ret < 0)
217 break;
218
219 skip = pos & 3;
220 for (i = 0; i < sizeof(u32); i++) {
221 if (i >= skip) {
222 *buf++ = val;
223 if (++pos == end)
224 break;
225 }
226 val >>= 8;
227 }
228 }
229out:
230 mutex_unlock(&vpd->lock);
231 return ret ? ret : count;
232}
233
234static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
235 const void *arg)
236{
237 struct pci_vpd *vpd = dev->vpd;
238 const u8 *buf = arg;
239 loff_t end = pos + count;
240 int ret = 0;
241
242 if (pos < 0 || (pos & 3) || (count & 3))
243 return -EINVAL;
244
245 if (!vpd->valid) {
246 vpd->valid = 1;
247 vpd->len = pci_vpd_size(dev, vpd->len);
248 }
249
250 if (vpd->len == 0)
251 return -EIO;
252
253 if (end > vpd->len)
254 return -EINVAL;
255
256 if (mutex_lock_killable(&vpd->lock))
257 return -EINTR;
258
259 ret = pci_vpd_wait(dev);
260 if (ret < 0)
261 goto out;
262
263 while (pos < end) {
264 u32 val;
265
266 val = *buf++;
267 val |= *buf++ << 8;
268 val |= *buf++ << 16;
269 val |= *buf++ << 24;
270
271 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
272 if (ret < 0)
273 break;
274 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
275 pos | PCI_VPD_ADDR_F);
276 if (ret < 0)
277 break;
278
279 vpd->busy = 1;
280 vpd->flag = 0;
281 ret = pci_vpd_wait(dev);
282 if (ret < 0)
283 break;
284
285 pos += sizeof(u32);
286 }
287out:
288 mutex_unlock(&vpd->lock);
289 return ret ? ret : count;
290}
291
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500292static const struct pci_vpd_ops pci_vpd_ops = {
293 .read = pci_vpd_read,
294 .write = pci_vpd_write,
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500295};
296
297static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
298 void *arg)
299{
Heiner Kallweit5881b382021-04-16 21:52:07 +0200300 struct pci_dev *tdev = pci_get_func0_dev(dev);
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500301 ssize_t ret;
302
303 if (!tdev)
304 return -ENODEV;
305
306 ret = pci_read_vpd(tdev, pos, count, arg);
307 pci_dev_put(tdev);
308 return ret;
309}
310
311static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
312 const void *arg)
313{
Heiner Kallweit5881b382021-04-16 21:52:07 +0200314 struct pci_dev *tdev = pci_get_func0_dev(dev);
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500315 ssize_t ret;
316
317 if (!tdev)
318 return -ENODEV;
319
320 ret = pci_write_vpd(tdev, pos, count, arg);
321 pci_dev_put(tdev);
322 return ret;
323}
324
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500325static const struct pci_vpd_ops pci_vpd_f0_ops = {
326 .read = pci_vpd_f0_read,
327 .write = pci_vpd_f0_write,
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500328};
329
Heiner Kallweite947e7b2021-04-01 18:37:47 +0200330void pci_vpd_init(struct pci_dev *dev)
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500331{
332 struct pci_vpd *vpd;
333 u8 cap;
334
335 cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
336 if (!cap)
Heiner Kallweite947e7b2021-04-01 18:37:47 +0200337 return;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500338
339 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
340 if (!vpd)
Heiner Kallweite947e7b2021-04-01 18:37:47 +0200341 return;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500342
343 vpd->len = PCI_VPD_MAX_SIZE;
344 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
345 vpd->ops = &pci_vpd_f0_ops;
346 else
347 vpd->ops = &pci_vpd_ops;
348 mutex_init(&vpd->lock);
349 vpd->cap = cap;
350 vpd->busy = 0;
351 vpd->valid = 0;
352 dev->vpd = vpd;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500353}
354
355void pci_vpd_release(struct pci_dev *dev)
356{
357 kfree(dev->vpd);
358}
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000359
Bjorn Helgaas07b45232021-04-28 13:32:53 -0500360static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
361 struct bin_attribute *bin_attr, char *buf, loff_t off,
362 size_t count)
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500363{
364 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
365
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500366 return pci_read_vpd(dev, off, count, buf);
367}
368
Bjorn Helgaas07b45232021-04-28 13:32:53 -0500369static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
370 struct bin_attribute *bin_attr, char *buf, loff_t off,
371 size_t count)
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500372{
373 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
374
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500375 return pci_write_vpd(dev, off, count, buf);
376}
Krzysztof WilczyƄskid93f8392021-04-16 20:58:40 +0000377static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500378
Krzysztof WilczyƄskid93f8392021-04-16 20:58:40 +0000379static struct bin_attribute *vpd_attrs[] = {
380 &bin_attr_vpd,
381 NULL,
382};
383
384static umode_t vpd_attr_is_visible(struct kobject *kobj,
385 struct bin_attribute *a, int n)
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500386{
Krzysztof WilczyƄskid93f8392021-04-16 20:58:40 +0000387 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500388
Krzysztof WilczyƄskid93f8392021-04-16 20:58:40 +0000389 if (!pdev->vpd)
390 return 0;
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500391
Krzysztof WilczyƄskid93f8392021-04-16 20:58:40 +0000392 return a->attr.mode;
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500393}
394
Krzysztof WilczyƄskid93f8392021-04-16 20:58:40 +0000395const struct attribute_group pci_dev_vpd_attr_group = {
396 .bin_attrs = vpd_attrs,
397 .is_bin_visible = vpd_attr_is_visible,
398};
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000399
Heiner Kallweit4cf0abb2021-04-01 18:43:15 +0200400int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt)
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000401{
Heiner Kallweit0a08bc02021-04-01 18:44:15 +0200402 int i = 0;
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000403
Heiner Kallweit0a08bc02021-04-01 18:44:15 +0200404 /* look for LRDT tags only, end tag is the only SRDT tag */
405 while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) {
406 if (buf[i] == rdt)
407 return i;
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000408
Heiner Kallweit0a08bc02021-04-01 18:44:15 +0200409 i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i);
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000410 }
411
412 return -ENOENT;
413}
414EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
Matt Carlson4067a852010-02-26 14:04:43 +0000415
416int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
417 unsigned int len, const char *kw)
418{
419 int i;
420
421 for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
422 if (buf[i + 0] == kw[0] &&
423 buf[i + 1] == kw[1])
424 return i;
425
426 i += PCI_VPD_INFO_FLD_HDR_SIZE +
427 pci_vpd_info_field_size(&buf[i]);
428 }
429
430 return -ENOENT;
431}
432EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
Bjorn Helgaas99605852018-03-19 13:06:24 -0500433
434#ifdef CONFIG_PCI_QUIRKS
435/*
436 * Quirk non-zero PCI functions to route VPD access through function 0 for
437 * devices that share VPD resources between functions. The functions are
438 * expected to be identical devices.
439 */
440static void quirk_f0_vpd_link(struct pci_dev *dev)
441{
442 struct pci_dev *f0;
443
444 if (!PCI_FUNC(dev->devfn))
445 return;
446
Heiner Kallweit5881b382021-04-16 21:52:07 +0200447 f0 = pci_get_func0_dev(dev);
Bjorn Helgaas99605852018-03-19 13:06:24 -0500448 if (!f0)
449 return;
450
451 if (f0->vpd && dev->class == f0->class &&
452 dev->vendor == f0->vendor && dev->device == f0->device)
453 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
454
455 pci_dev_put(f0);
456}
457DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
458 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
459
460/*
461 * If a device follows the VPD format spec, the PCI core will not read or
462 * write past the VPD End Tag. But some vendors do not follow the VPD
463 * format spec, so we can't tell how much data is safe to access. Devices
464 * may behave unpredictably if we access too much. Blacklist these devices
465 * so we don't touch VPD at all.
466 */
467static void quirk_blacklist_vpd(struct pci_dev *dev)
468{
469 if (dev->vpd) {
470 dev->vpd->len = 0;
471 pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
472 }
473}
474DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
475DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
476DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
477DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
478DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
479DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
480DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
481DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
482DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
483DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
484DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
485DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
486 quirk_blacklist_vpd);
Jonathan Chocrona638b5d2019-09-12 16:00:41 +0300487/*
488 * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
489 * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
490 */
491DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
492 PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
Bjorn Helgaas99605852018-03-19 13:06:24 -0500493
Heiner Kallweit384d0c62021-02-12 11:02:47 +0100494static void pci_vpd_set_size(struct pci_dev *dev, size_t len)
495{
496 struct pci_vpd *vpd = dev->vpd;
497
498 if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE)
499 return;
500
501 vpd->valid = 1;
502 vpd->len = len;
503}
504
Bjorn Helgaas99605852018-03-19 13:06:24 -0500505static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
506{
Linus Torvalds3c0d5512018-04-06 18:31:06 -0700507 int chip = (dev->device & 0xf000) >> 12;
508 int func = (dev->device & 0x0f00) >> 8;
509 int prod = (dev->device & 0x00ff) >> 0;
510
511 /*
512 * If this is a T3-based adapter, there's a 1KB VPD area at offset
513 * 0xc00 which contains the preferred VPD values. If this is a T4 or
514 * later based adapter, the special VPD is at offset 0x400 for the
515 * Physical Functions (the SR-IOV Virtual Functions have no VPD
516 * Capabilities). The PCI VPD Access core routines will normally
517 * compute the size of the VPD by parsing the VPD Data Structure at
518 * offset 0x000. This will result in silent failures when attempting
519 * to accesses these other VPD areas which are beyond those computed
520 * limits.
521 */
522 if (chip == 0x0 && prod >= 0x20)
Heiner Kallweit384d0c62021-02-12 11:02:47 +0100523 pci_vpd_set_size(dev, 8192);
Linus Torvalds3c0d5512018-04-06 18:31:06 -0700524 else if (chip >= 0x4 && func < 0x8)
Heiner Kallweit384d0c62021-02-12 11:02:47 +0100525 pci_vpd_set_size(dev, 2048);
Bjorn Helgaas99605852018-03-19 13:06:24 -0500526}
Linus Torvalds3c0d5512018-04-06 18:31:06 -0700527
528DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
529 quirk_chelsio_extend_vpd);
530
Bjorn Helgaas99605852018-03-19 13:06:24 -0500531#endif