blob: 7c3a097379bb7182262483ebf35ea2ef50aedd17 [file] [log] [blame]
Bjorn Helgaas7328c8f2018-01-26 11:45:16 -06001// SPDX-License-Identifier: GPL-2.0
Matt Carlsonb55ac1b2010-02-26 14:04:41 +00002/*
Bjorn Helgaasdf62ab52018-03-09 16:36:33 -06003 * PCI VPD support
Matt Carlsonb55ac1b2010-02-26 14:04:41 +00004 *
5 * Copyright (C) 2010 Broadcom Corporation.
6 */
7
8#include <linux/pci.h>
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -05009#include <linux/delay.h>
Paul Gortmaker363c75d2011-05-27 09:37:25 -040010#include <linux/export.h>
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050011#include <linux/sched/signal.h>
12#include "pci.h"
13
14/* VPD access through PCI 2.2+ VPD capability */
15
Heiner Kallweit5881b382021-04-16 21:52:07 +020016static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
17{
18 return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
19}
20
Heiner Kallweit22ff2bc2021-08-08 19:21:02 +020021#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
22#define PCI_VPD_SZ_INVALID UINT_MAX
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050023
24/**
25 * pci_vpd_size - determine actual size of Vital Product Data
26 * @dev: pci device struct
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050027 */
Heiner Kallweit12857622021-05-13 22:56:09 +020028static size_t pci_vpd_size(struct pci_dev *dev)
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050029{
Bjorn Helgaas63030492021-07-15 16:59:57 -050030 size_t off = 0, size;
31 unsigned char tag, header[1+2]; /* 1 byte tag, 2 bytes length */
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050032
Heiner Kallweit22ff2bc2021-08-08 19:21:02 +020033 /* Otherwise the following reads would fail. */
Heiner Kallweitfd00faa2021-08-08 19:21:56 +020034 dev->vpd.len = PCI_VPD_MAX_SIZE;
Heiner Kallweit22ff2bc2021-08-08 19:21:02 +020035
Heiner Kallweit12857622021-05-13 22:56:09 +020036 while (pci_read_vpd(dev, off, 1, header) == 1) {
Bjorn Helgaas63030492021-07-15 16:59:57 -050037 size = 0;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050038
Heiner Kallweit4e0d77f2021-07-29 12:22:25 -050039 if (off == 0 && (header[0] == 0x00 || header[0] == 0xff))
40 goto error;
Heiner Kallweitd1df5f32021-04-01 14:03:49 +020041
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050042 if (header[0] & PCI_VPD_LRDT) {
43 /* Large Resource Data Type Tag */
Bjorn Helgaas7fa75dd2021-07-15 16:59:58 -050044 if (pci_read_vpd(dev, off + 1, 2, &header[1]) != 2) {
45 pci_warn(dev, "failed VPD read at offset %zu\n",
46 off + 1);
Heiner Kallweit22ff2bc2021-08-08 19:21:02 +020047 return off ?: PCI_VPD_SZ_INVALID;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050048 }
Bjorn Helgaas7fa75dd2021-07-15 16:59:58 -050049 size = pci_vpd_lrdt_size(header);
50 if (off + size > PCI_VPD_MAX_SIZE)
51 goto error;
52
53 off += PCI_VPD_LRDT_TAG_SIZE + size;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050054 } else {
55 /* Short Resource Data Type Tag */
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050056 tag = pci_vpd_srdt_tag(header);
Bjorn Helgaas63030492021-07-15 16:59:57 -050057 size = pci_vpd_srdt_size(header);
58 if (off + size > PCI_VPD_MAX_SIZE)
59 goto error;
60
61 off += PCI_VPD_SRDT_TAG_SIZE + size;
Bjorn Helgaas70730db2021-07-15 16:59:56 -050062 if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
63 return off;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050064 }
65 }
Bjorn Helgaas5fe204e2021-07-15 16:59:59 -050066 return off;
Heiner Kallweit4e0d77f2021-07-29 12:22:25 -050067
68error:
Bjorn Helgaas63030492021-07-15 16:59:57 -050069 pci_info(dev, "invalid VPD tag %#04x (size %zu) at offset %zu%s\n",
70 header[0], size, off, off == 0 ?
Heiner Kallweit4e0d77f2021-07-29 12:22:25 -050071 "; assume missing optional EEPROM" : "");
Heiner Kallweit22ff2bc2021-08-08 19:21:02 +020072 return off ?: PCI_VPD_SZ_INVALID;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050073}
74
75/*
76 * Wait for last operation to complete.
77 * This code has to spin since there is no other notification from the PCI
78 * hardware. Since the VPD is often implemented by serial attachment to an
79 * EEPROM, it may take many milliseconds to complete.
Heiner Kallweitfe943bd2021-05-13 23:02:01 +020080 * @set: if true wait for flag to be set, else wait for it to be cleared
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050081 *
82 * Returns 0 on success, negative values indicate error.
83 */
Heiner Kallweitfe943bd2021-05-13 23:02:01 +020084static int pci_vpd_wait(struct pci_dev *dev, bool set)
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050085{
Heiner Kallweitfd00faa2021-08-08 19:21:56 +020086 struct pci_vpd *vpd = &dev->vpd;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050087 unsigned long timeout = jiffies + msecs_to_jiffies(125);
88 unsigned long max_sleep = 16;
89 u16 status;
90 int ret;
91
Bert Kenward6eaf2782018-07-26 16:21:29 +010092 do {
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050093 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
94 &status);
95 if (ret < 0)
96 return ret;
97
Heiner Kallweitfe943bd2021-05-13 23:02:01 +020098 if (!!(status & PCI_VPD_ADDR_F) == set)
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -050099 return 0;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500100
Bert Kenward6eaf2782018-07-26 16:21:29 +0100101 if (time_after(jiffies, timeout))
102 break;
103
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500104 usleep_range(10, max_sleep);
105 if (max_sleep < 1024)
106 max_sleep *= 2;
Bert Kenward6eaf2782018-07-26 16:21:29 +0100107 } while (true);
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500108
109 pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
110 return -ETIMEDOUT;
111}
112
113static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
114 void *arg)
115{
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200116 struct pci_vpd *vpd = &dev->vpd;
Heiner Kallweit91ab5d92021-05-13 22:56:41 +0200117 int ret = 0;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500118 loff_t end = pos + count;
119 u8 *buf = arg;
120
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200121 if (!vpd->cap)
Heiner Kallweita38fccd2021-08-08 19:20:05 +0200122 return -ENODEV;
123
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500124 if (pos < 0)
125 return -EINVAL;
126
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500127 if (pos > vpd->len)
128 return 0;
129
130 if (end > vpd->len) {
131 end = vpd->len;
132 count = end - pos;
133 }
134
135 if (mutex_lock_killable(&vpd->lock))
136 return -EINTR;
137
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500138 while (pos < end) {
139 u32 val;
140 unsigned int i, skip;
141
Heiner Kallweit91ab5d92021-05-13 22:56:41 +0200142 if (fatal_signal_pending(current)) {
143 ret = -EINTR;
144 break;
145 }
146
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500147 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
148 pos & ~3);
149 if (ret < 0)
150 break;
Heiner Kallweitfe943bd2021-05-13 23:02:01 +0200151 ret = pci_vpd_wait(dev, true);
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500152 if (ret < 0)
153 break;
154
155 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
156 if (ret < 0)
157 break;
158
159 skip = pos & 3;
160 for (i = 0; i < sizeof(u32); i++) {
161 if (i >= skip) {
162 *buf++ = val;
163 if (++pos == end)
164 break;
165 }
166 val >>= 8;
167 }
168 }
Heiner Kallweit91ab5d92021-05-13 22:56:41 +0200169
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500170 mutex_unlock(&vpd->lock);
171 return ret ? ret : count;
172}
173
174static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
175 const void *arg)
176{
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200177 struct pci_vpd *vpd = &dev->vpd;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500178 const u8 *buf = arg;
179 loff_t end = pos + count;
180 int ret = 0;
181
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200182 if (!vpd->cap)
Heiner Kallweita38fccd2021-08-08 19:20:05 +0200183 return -ENODEV;
184
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500185 if (pos < 0 || (pos & 3) || (count & 3))
186 return -EINVAL;
187
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500188 if (end > vpd->len)
189 return -EINVAL;
190
191 if (mutex_lock_killable(&vpd->lock))
192 return -EINTR;
193
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500194 while (pos < end) {
195 u32 val;
196
197 val = *buf++;
198 val |= *buf++ << 8;
199 val |= *buf++ << 16;
200 val |= *buf++ << 24;
201
202 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
203 if (ret < 0)
204 break;
205 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
206 pos | PCI_VPD_ADDR_F);
207 if (ret < 0)
208 break;
209
Heiner Kallweitfe943bd2021-05-13 23:02:01 +0200210 ret = pci_vpd_wait(dev, false);
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500211 if (ret < 0)
212 break;
213
214 pos += sizeof(u32);
215 }
Heiner Kallweit91ab5d92021-05-13 22:56:41 +0200216
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500217 mutex_unlock(&vpd->lock);
218 return ret ? ret : count;
219}
220
Heiner Kallweite947e7b2021-04-01 18:37:47 +0200221void pci_vpd_init(struct pci_dev *dev)
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500222{
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200223 dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
224 mutex_init(&dev->vpd.lock);
Heiner Kallweit7bac5442021-08-08 19:22:52 +0200225
226 if (!dev->vpd.len)
227 dev->vpd.len = pci_vpd_size(dev);
Heiner Kallweitfe7568cf2021-08-08 19:23:57 +0200228
229 if (dev->vpd.len == PCI_VPD_SZ_INVALID)
230 dev->vpd.cap = 0;
Bjorn Helgaasf0eb77a2018-03-19 13:06:11 -0500231}
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000232
Bjorn Helgaas07b45232021-04-28 13:32:53 -0500233static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
234 struct bin_attribute *bin_attr, char *buf, loff_t off,
235 size_t count)
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500236{
237 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
238
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500239 return pci_read_vpd(dev, off, count, buf);
240}
241
Bjorn Helgaas07b45232021-04-28 13:32:53 -0500242static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
243 struct bin_attribute *bin_attr, char *buf, loff_t off,
244 size_t count)
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500245{
246 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
247
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500248 return pci_write_vpd(dev, off, count, buf);
249}
Krzysztof Wilczyńskid93f8392021-04-16 20:58:40 +0000250static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500251
Krzysztof Wilczyńskid93f8392021-04-16 20:58:40 +0000252static struct bin_attribute *vpd_attrs[] = {
253 &bin_attr_vpd,
254 NULL,
255};
256
257static umode_t vpd_attr_is_visible(struct kobject *kobj,
258 struct bin_attribute *a, int n)
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500259{
Krzysztof Wilczyńskid93f8392021-04-16 20:58:40 +0000260 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500261
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200262 if (!pdev->vpd.cap)
Krzysztof Wilczyńskid93f8392021-04-16 20:58:40 +0000263 return 0;
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500264
Krzysztof Wilczyńskid93f8392021-04-16 20:58:40 +0000265 return a->attr.mode;
Bjorn Helgaasb1c615c2018-03-19 13:06:17 -0500266}
267
Krzysztof Wilczyńskid93f8392021-04-16 20:58:40 +0000268const struct attribute_group pci_dev_vpd_attr_group = {
269 .bin_attrs = vpd_attrs,
270 .is_bin_visible = vpd_attr_is_visible,
271};
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000272
Heiner Kallweit76f3c032021-08-18 20:59:31 +0200273void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
274{
275 unsigned int len = dev->vpd.len;
276 void *buf;
277 int cnt;
278
279 if (!dev->vpd.cap)
280 return ERR_PTR(-ENODEV);
281
282 buf = kmalloc(len, GFP_KERNEL);
283 if (!buf)
284 return ERR_PTR(-ENOMEM);
285
286 cnt = pci_read_vpd(dev, 0, len, buf);
287 if (cnt != len) {
288 kfree(buf);
289 return ERR_PTR(-EIO);
290 }
291
292 if (size)
293 *size = len;
294
295 return buf;
296}
297EXPORT_SYMBOL_GPL(pci_vpd_alloc);
298
Heiner Kallweit4cf0abb2021-04-01 18:43:15 +0200299int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt)
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000300{
Heiner Kallweit0a08bc02021-04-01 18:44:15 +0200301 int i = 0;
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000302
Heiner Kallweit0a08bc02021-04-01 18:44:15 +0200303 /* look for LRDT tags only, end tag is the only SRDT tag */
304 while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) {
305 if (buf[i] == rdt)
306 return i;
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000307
Heiner Kallweit0a08bc02021-04-01 18:44:15 +0200308 i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i);
Matt Carlsonb55ac1b2010-02-26 14:04:41 +0000309 }
310
311 return -ENOENT;
312}
313EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
Matt Carlson4067a852010-02-26 14:04:43 +0000314
315int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
316 unsigned int len, const char *kw)
317{
318 int i;
319
320 for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
321 if (buf[i + 0] == kw[0] &&
322 buf[i + 1] == kw[1])
323 return i;
324
325 i += PCI_VPD_INFO_FLD_HDR_SIZE +
326 pci_vpd_info_field_size(&buf[i]);
327 }
328
329 return -ENOENT;
330}
331EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
Bjorn Helgaas99605852018-03-19 13:06:24 -0500332
Heiner Kallweitd27f7342021-08-08 19:19:10 +0200333/**
334 * pci_read_vpd - Read one entry from Vital Product Data
335 * @dev: PCI device struct
336 * @pos: offset in VPD space
337 * @count: number of bytes to read
338 * @buf: pointer to where to store result
339 */
340ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
341{
Heiner Kallweita38fccd2021-08-08 19:20:05 +0200342 ssize_t ret;
343
344 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
345 dev = pci_get_func0_dev(dev);
346 if (!dev)
347 return -ENODEV;
348
349 ret = pci_vpd_read(dev, pos, count, buf);
350 pci_dev_put(dev);
351 return ret;
352 }
353
354 return pci_vpd_read(dev, pos, count, buf);
Heiner Kallweitd27f7342021-08-08 19:19:10 +0200355}
356EXPORT_SYMBOL(pci_read_vpd);
357
358/**
359 * pci_write_vpd - Write entry to Vital Product Data
360 * @dev: PCI device struct
361 * @pos: offset in VPD space
362 * @count: number of bytes to write
363 * @buf: buffer containing write data
364 */
365ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
366{
Heiner Kallweita38fccd2021-08-08 19:20:05 +0200367 ssize_t ret;
368
369 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
370 dev = pci_get_func0_dev(dev);
371 if (!dev)
372 return -ENODEV;
373
374 ret = pci_vpd_write(dev, pos, count, buf);
375 pci_dev_put(dev);
376 return ret;
377 }
378
379 return pci_vpd_write(dev, pos, count, buf);
Heiner Kallweitd27f7342021-08-08 19:19:10 +0200380}
381EXPORT_SYMBOL(pci_write_vpd);
382
Bjorn Helgaas99605852018-03-19 13:06:24 -0500383#ifdef CONFIG_PCI_QUIRKS
384/*
385 * Quirk non-zero PCI functions to route VPD access through function 0 for
386 * devices that share VPD resources between functions. The functions are
387 * expected to be identical devices.
388 */
389static void quirk_f0_vpd_link(struct pci_dev *dev)
390{
391 struct pci_dev *f0;
392
393 if (!PCI_FUNC(dev->devfn))
394 return;
395
Heiner Kallweit5881b382021-04-16 21:52:07 +0200396 f0 = pci_get_func0_dev(dev);
Bjorn Helgaas99605852018-03-19 13:06:24 -0500397 if (!f0)
398 return;
399
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200400 if (f0->vpd.cap && dev->class == f0->class &&
Bjorn Helgaas99605852018-03-19 13:06:24 -0500401 dev->vendor == f0->vendor && dev->device == f0->device)
402 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
403
404 pci_dev_put(f0);
405}
406DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
407 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
408
409/*
410 * If a device follows the VPD format spec, the PCI core will not read or
411 * write past the VPD End Tag. But some vendors do not follow the VPD
412 * format spec, so we can't tell how much data is safe to access. Devices
413 * may behave unpredictably if we access too much. Blacklist these devices
414 * so we don't touch VPD at all.
415 */
416static void quirk_blacklist_vpd(struct pci_dev *dev)
417{
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200418 dev->vpd.len = PCI_VPD_SZ_INVALID;
419 pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
Bjorn Helgaas99605852018-03-19 13:06:24 -0500420}
Heiner Kallweit7bac5442021-08-08 19:22:52 +0200421DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
422DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
423DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
424DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
425DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
426DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
427DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
428DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
429DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
430DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
431DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
432DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd);
Jonathan Chocrona638b5d2019-09-12 16:00:41 +0300433/*
434 * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
435 * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
436 */
Heiner Kallweit7bac5442021-08-08 19:22:52 +0200437DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
438 PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
Bjorn Helgaas99605852018-03-19 13:06:24 -0500439
Bjorn Helgaas99605852018-03-19 13:06:24 -0500440static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
441{
Linus Torvalds3c0d5512018-04-06 18:31:06 -0700442 int chip = (dev->device & 0xf000) >> 12;
443 int func = (dev->device & 0x0f00) >> 8;
444 int prod = (dev->device & 0x00ff) >> 0;
445
446 /*
447 * If this is a T3-based adapter, there's a 1KB VPD area at offset
448 * 0xc00 which contains the preferred VPD values. If this is a T4 or
449 * later based adapter, the special VPD is at offset 0x400 for the
450 * Physical Functions (the SR-IOV Virtual Functions have no VPD
451 * Capabilities). The PCI VPD Access core routines will normally
452 * compute the size of the VPD by parsing the VPD Data Structure at
453 * offset 0x000. This will result in silent failures when attempting
454 * to accesses these other VPD areas which are beyond those computed
455 * limits.
456 */
457 if (chip == 0x0 && prod >= 0x20)
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200458 dev->vpd.len = 8192;
Linus Torvalds3c0d5512018-04-06 18:31:06 -0700459 else if (chip >= 0x4 && func < 0x8)
Heiner Kallweitfd00faa2021-08-08 19:21:56 +0200460 dev->vpd.len = 2048;
Bjorn Helgaas99605852018-03-19 13:06:24 -0500461}
Linus Torvalds3c0d5512018-04-06 18:31:06 -0700462
Heiner Kallweit7bac5442021-08-08 19:22:52 +0200463DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
464 quirk_chelsio_extend_vpd);
Linus Torvalds3c0d5512018-04-06 18:31:06 -0700465
Bjorn Helgaas99605852018-03-19 13:06:24 -0500466#endif