^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCI VPD support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010 Broadcom Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /* VPD access through PCI 2.2+ VPD capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct pci_vpd_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int (*set_size)(struct pci_dev *dev, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct pci_vpd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) const struct pci_vpd_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u16 flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u8 cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned int busy:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int valid:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * pci_read_vpd - Read one entry from Vital Product Data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @dev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * @pos: offset in vpd space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * @count: number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * @buf: pointer to where to store result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (!dev->vpd || !dev->vpd->ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return dev->vpd->ops->read(dev, pos, count, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) EXPORT_SYMBOL(pci_read_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * pci_write_vpd - Write entry to Vital Product Data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @dev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @pos: offset in vpd space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @count: number of bytes to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @buf: buffer containing write data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!dev->vpd || !dev->vpd->ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return dev->vpd->ops->write(dev, pos, count, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) EXPORT_SYMBOL(pci_write_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * pci_set_vpd_size - Set size of Vital Product Data space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @dev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @len: size of vpd space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int pci_set_vpd_size(struct pci_dev *dev, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!dev->vpd || !dev->vpd->ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return dev->vpd->ops->set_size(dev, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) EXPORT_SYMBOL(pci_set_vpd_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * pci_vpd_size - determine actual size of Vital Product Data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @dev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @old_size: current assumed size, also maximum allowed size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) size_t off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) while (off < old_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pci_read_vpd(dev, off, 1, header) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned char tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (header[0] & PCI_VPD_LRDT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Large Resource Data Type Tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) tag = pci_vpd_lrdt_tag(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Only read length from known tag items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if ((tag == PCI_VPD_LTIN_ID_STRING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) (tag == PCI_VPD_LTIN_RO_DATA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) (tag == PCI_VPD_LTIN_RW_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (pci_read_vpd(dev, off+1, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) &header[1]) != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) tag, off + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) off += PCI_VPD_LRDT_TAG_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pci_vpd_lrdt_size(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Short Resource Data Type Tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) off += PCI_VPD_SRDT_TAG_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pci_vpd_srdt_size(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) tag = pci_vpd_srdt_tag(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if ((tag != PCI_VPD_LTIN_ID_STRING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) (tag != PCI_VPD_LTIN_RO_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) (tag != PCI_VPD_LTIN_RW_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) (header[0] & PCI_VPD_LRDT) ? "large" : "short",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) tag, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Wait for last operation to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * This code has to spin since there is no other notification from the PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * hardware. Since the VPD is often implemented by serial attachment to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * EEPROM, it may take many milliseconds to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Returns 0 on success, negative values indicate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int pci_vpd_wait(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct pci_vpd *vpd = dev->vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned long timeout = jiffies + msecs_to_jiffies(125);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long max_sleep = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!vpd->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) vpd->busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (time_after(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) usleep_range(10, max_sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (max_sleep < 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) max_sleep *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) } while (true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct pci_vpd *vpd = dev->vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) loff_t end = pos + count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u8 *buf = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (pos < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!vpd->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) vpd->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) vpd->len = pci_vpd_size(dev, vpd->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (vpd->len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (pos > vpd->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (end > vpd->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) end = vpd->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) count = end - pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (mutex_lock_killable(&vpd->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret = pci_vpd_wait(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) while (pos < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned int i, skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pos & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) vpd->busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) vpd->flag = PCI_VPD_ADDR_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret = pci_vpd_wait(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) skip = pos & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) for (i = 0; i < sizeof(u32); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (i >= skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *buf++ = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (++pos == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) val >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mutex_unlock(&vpd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) const void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct pci_vpd *vpd = dev->vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) const u8 *buf = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) loff_t end = pos + count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (pos < 0 || (pos & 3) || (count & 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (!vpd->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) vpd->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) vpd->len = pci_vpd_size(dev, vpd->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (vpd->len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (end > vpd->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (mutex_lock_killable(&vpd->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ret = pci_vpd_wait(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) while (pos < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) val = *buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) val |= *buf++ << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) val |= *buf++ << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) val |= *buf++ << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pos | PCI_VPD_ADDR_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) vpd->busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) vpd->flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ret = pci_vpd_wait(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pos += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) mutex_unlock(&vpd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct pci_vpd *vpd = dev->vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (len == 0 || len > PCI_VPD_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) vpd->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) vpd->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static const struct pci_vpd_ops pci_vpd_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) .read = pci_vpd_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) .write = pci_vpd_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .set_size = pci_vpd_set_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct pci_dev *tdev = pci_get_slot(dev->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!tdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ret = pci_read_vpd(tdev, pos, count, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pci_dev_put(tdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) const void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct pci_dev *tdev = pci_get_slot(dev->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!tdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = pci_write_vpd(tdev, pos, count, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) pci_dev_put(tdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct pci_dev *tdev = pci_get_slot(dev->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!tdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ret = pci_set_vpd_size(tdev, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pci_dev_put(tdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static const struct pci_vpd_ops pci_vpd_f0_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) .read = pci_vpd_f0_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) .write = pci_vpd_f0_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) .set_size = pci_vpd_f0_set_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int pci_vpd_init(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct pci_vpd *vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u8 cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) vpd->len = PCI_VPD_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) vpd->ops = &pci_vpd_f0_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) vpd->ops = &pci_vpd_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) mutex_init(&vpd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) vpd->cap = cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) vpd->busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) vpd->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) dev->vpd = vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) void pci_vpd_release(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) kfree(dev->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct bin_attribute *bin_attr, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (bin_attr->size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (off > bin_attr->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) else if (count > bin_attr->size - off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) count = bin_attr->size - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return pci_read_vpd(dev, off, count, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct bin_attribute *bin_attr, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (bin_attr->size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (off > bin_attr->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) else if (count > bin_attr->size - off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) count = bin_attr->size - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return pci_write_vpd(dev, off, count, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct bin_attribute *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (!dev->vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) sysfs_bin_attr_init(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) attr->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) attr->attr.name = "vpd";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) attr->attr.mode = S_IRUSR | S_IWUSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) attr->read = read_vpd_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) attr->write = write_vpd_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) kfree(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) dev->vpd->attr = attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (dev->vpd && dev->vpd->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) kfree(dev->vpd->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) for (i = off; i < len; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u8 val = buf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (val & PCI_VPD_LRDT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Don't return success of the tag isn't complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (i + PCI_VPD_LRDT_TAG_SIZE > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (val == rdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) i += PCI_VPD_LRDT_TAG_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) pci_vpd_lrdt_size(&buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (tag == rdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (tag == PCI_VPD_SRDT_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) i += PCI_VPD_SRDT_TAG_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pci_vpd_srdt_size(&buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) unsigned int len, const char *kw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (buf[i + 0] == kw[0] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) buf[i + 1] == kw[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) i += PCI_VPD_INFO_FLD_HDR_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) pci_vpd_info_field_size(&buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #ifdef CONFIG_PCI_QUIRKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Quirk non-zero PCI functions to route VPD access through function 0 for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * devices that share VPD resources between functions. The functions are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * expected to be identical devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void quirk_f0_vpd_link(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct pci_dev *f0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!PCI_FUNC(dev->devfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!f0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (f0->vpd && dev->class == f0->class &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev->vendor == f0->vendor && dev->device == f0->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) pci_dev_put(f0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * If a device follows the VPD format spec, the PCI core will not read or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * write past the VPD End Tag. But some vendors do not follow the VPD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * format spec, so we can't tell how much data is safe to access. Devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * may behave unpredictably if we access too much. Blacklist these devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * so we don't touch VPD at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static void quirk_blacklist_vpd(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (dev->vpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dev->vpd->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * VPD end tag will hang the device. This problem was initially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * observed when a vpd entry was created in sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * will dump 32k of data. Reading a full 32k will cause an access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * beyond the VPD end tag causing the device to hang. Once the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * is hung, the bnx2 driver will not be able to reset the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * We believe that it is legal to read beyond the end tag and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * therefore the solution is to limit the read/write length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Only disable the VPD capability for 5706, 5706S, 5708,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * 5708S and 5709 rev. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) (dev->device == PCI_DEVICE_ID_NX2_5708) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) (dev->revision & 0xf0) == 0x0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (dev->vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) dev->vpd->len = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) PCI_DEVICE_ID_NX2_5706,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) quirk_brcm_570x_limit_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) PCI_DEVICE_ID_NX2_5706S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) quirk_brcm_570x_limit_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) PCI_DEVICE_ID_NX2_5708,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) quirk_brcm_570x_limit_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) PCI_DEVICE_ID_NX2_5708S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) quirk_brcm_570x_limit_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) PCI_DEVICE_ID_NX2_5709,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) quirk_brcm_570x_limit_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) PCI_DEVICE_ID_NX2_5709S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) quirk_brcm_570x_limit_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int chip = (dev->device & 0xf000) >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int func = (dev->device & 0x0f00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int prod = (dev->device & 0x00ff) >> 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * If this is a T3-based adapter, there's a 1KB VPD area at offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * 0xc00 which contains the preferred VPD values. If this is a T4 or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * later based adapter, the special VPD is at offset 0x400 for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Physical Functions (the SR-IOV Virtual Functions have no VPD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * Capabilities). The PCI VPD Access core routines will normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * compute the size of the VPD by parsing the VPD Data Structure at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * offset 0x000. This will result in silent failures when attempting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * to accesses these other VPD areas which are beyond those computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (chip == 0x0 && prod >= 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) pci_set_vpd_size(dev, 8192);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) else if (chip >= 0x4 && func < 0x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pci_set_vpd_size(dev, 2048);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) quirk_chelsio_extend_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) #endif