^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This interrupt-safe spinlock protects all accesses to PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * configuration space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) DEFINE_RAW_SPINLOCK(pci_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Wrappers for all PCI configuration access functions. They just check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * alignment, do locking and call the low-level functions pointed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * by pci_dev->ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define PCI_byte_BAD 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define PCI_word_BAD (pos & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define PCI_dword_BAD (pos & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifdef CONFIG_PCI_LOCKLESS_CONFIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) # define pci_lock_config(f) do { (void)(f); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) # define pci_unlock_config(f) do { (void)(f); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PCI_OP_READ(size, type, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int noinline pci_bus_read_config_##size \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u32 data = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) pci_lock_config(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) res = bus->ops->read(bus, devfn, pos, len, &data); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *value = (type)data; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) pci_unlock_config(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define PCI_OP_WRITE(size, type, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int noinline pci_bus_write_config_##size \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pci_lock_config(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) res = bus->ops->write(bus, devfn, pos, len, value); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) pci_unlock_config(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) PCI_OP_READ(byte, u8, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) PCI_OP_READ(word, u16, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) PCI_OP_READ(dword, u32, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) PCI_OP_WRITE(byte, u8, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) PCI_OP_WRITE(word, u16, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) PCI_OP_WRITE(dword, u32, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) EXPORT_SYMBOL(pci_bus_read_config_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) EXPORT_SYMBOL(pci_bus_read_config_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) EXPORT_SYMBOL(pci_bus_read_config_dword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) EXPORT_SYMBOL(pci_bus_write_config_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) EXPORT_SYMBOL(pci_bus_write_config_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) EXPORT_SYMBOL(pci_bus_write_config_dword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) addr = bus->ops->map_bus(bus, devfn, where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *val = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *val = readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) else if (size == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *val = readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *val = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) EXPORT_SYMBOL_GPL(pci_generic_config_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) addr = bus->ops->map_bus(bus, devfn, where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) writeb(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) else if (size == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) writew(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) writel(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) EXPORT_SYMBOL_GPL(pci_generic_config_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *val = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *val = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (size <= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) EXPORT_SYMBOL_GPL(pci_generic_config_read32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u32 mask, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (size == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) writel(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * In general, hardware that supports only 32-bit writes on PCI is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * not spec-compliant. For example, software may perform a 16-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * write. If the hardware only supports 32-bit accesses, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * do a 32-bit read, merge in the 16 bits we intend to write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * followed by a 32-bit write. If the 16 bits we *don't* intend to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * write happen to have any RW1C (write-one-to-clear) bits set, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * just inadvertently cleared something we shouldn't have.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) size, pci_domain_nr(bus), bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) PCI_SLOT(devfn), PCI_FUNC(devfn), where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) tmp = readl(addr) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) tmp |= val << ((where & 0x3) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) writel(tmp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) EXPORT_SYMBOL_GPL(pci_generic_config_write32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * pci_bus_set_ops - Set raw operations of pci bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @bus: pci bus struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @ops: new raw operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Return previous raw operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct pci_ops *old_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) raw_spin_lock_irqsave(&pci_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) old_ops = bus->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bus->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) raw_spin_unlock_irqrestore(&pci_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return old_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) EXPORT_SYMBOL(pci_bus_set_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * The following routines are to prevent the user from accessing PCI config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * space when it's unsafe to do so. Some devices require this during BIST and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * we're required to prevent it during D-state transitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * We have a bit per device to indicate it's blocked and a global wait queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * for callers to sleep on until devices are unblocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static noinline void pci_wait_cfg(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) __must_hold(&pci_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) raw_spin_unlock_irq(&pci_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) wait_event(pci_cfg_wait, !dev->block_cfg_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) raw_spin_lock_irq(&pci_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } while (dev->block_cfg_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Returns 0 on success, negative values indicate error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define PCI_USER_READ_CONFIG(size, type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int pci_user_read_config_##size \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) (struct pci_dev *dev, int pos, type *val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int ret = PCIBIOS_SUCCESSFUL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 data = -1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (PCI_##size##_BAD) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return -EINVAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) raw_spin_lock_irq(&pci_lock); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (unlikely(dev->block_cfg_access)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pci_wait_cfg(dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = dev->bus->ops->read(dev->bus, dev->devfn, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pos, sizeof(type), &data); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) raw_spin_unlock_irq(&pci_lock); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *val = (type)data; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return pcibios_err_to_errno(ret); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Returns 0 on success, negative values indicate error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define PCI_USER_WRITE_CONFIG(size, type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int pci_user_write_config_##size \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) (struct pci_dev *dev, int pos, type val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int ret = PCIBIOS_SUCCESSFUL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (PCI_##size##_BAD) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return -EINVAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) raw_spin_lock_irq(&pci_lock); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (unlikely(dev->block_cfg_access)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pci_wait_cfg(dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ret = dev->bus->ops->write(dev->bus, dev->devfn, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pos, sizeof(type), val); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) raw_spin_unlock_irq(&pci_lock); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return pcibios_err_to_errno(ret); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) PCI_USER_READ_CONFIG(byte, u8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) PCI_USER_READ_CONFIG(word, u16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) PCI_USER_READ_CONFIG(dword, u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) PCI_USER_WRITE_CONFIG(byte, u8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) PCI_USER_WRITE_CONFIG(word, u16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) PCI_USER_WRITE_CONFIG(dword, u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * pci_cfg_access_lock - Lock PCI config reads/writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @dev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * When access is locked, any userspace reads or writes to config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * space and concurrent lock requests will sleep until access is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * allowed via pci_cfg_access_unlock() again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void pci_cfg_access_lock(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) raw_spin_lock_irq(&pci_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (dev->block_cfg_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) pci_wait_cfg(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dev->block_cfg_access = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) raw_spin_unlock_irq(&pci_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * pci_cfg_access_trylock - try to lock PCI config reads/writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * @dev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Same as pci_cfg_access_lock, but will return 0 if access is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * already locked, 1 otherwise. This function can be used from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * atomic contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) bool pci_cfg_access_trylock(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) bool locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) raw_spin_lock_irqsave(&pci_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (dev->block_cfg_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) locked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dev->block_cfg_access = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) raw_spin_unlock_irqrestore(&pci_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * pci_cfg_access_unlock - Unlock PCI config reads/writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @dev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * This function allows PCI config accesses to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) void pci_cfg_access_unlock(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) raw_spin_lock_irqsave(&pci_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * This indicates a problem in the caller, but we don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * to kill them, unlike a double-block above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) WARN_ON(!dev->block_cfg_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dev->block_cfg_access = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) raw_spin_unlock_irqrestore(&pci_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) wake_up_all(&pci_cfg_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static inline int pcie_cap_version(const struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int type = pci_pcie_type(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return type == PCI_EXP_TYPE_ENDPOINT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) type == PCI_EXP_TYPE_LEG_END ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) type == PCI_EXP_TYPE_ROOT_PORT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) type == PCI_EXP_TYPE_UPSTREAM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) type == PCI_EXP_TYPE_DOWNSTREAM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) type == PCI_EXP_TYPE_PCI_BRIDGE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) type == PCI_EXP_TYPE_PCIE_BRIDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return pcie_downstream_port(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) bool pcie_cap_has_rtctl(const struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int type = pci_pcie_type(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return type == PCI_EXP_TYPE_ROOT_PORT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) type == PCI_EXP_TYPE_RC_EC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!pci_is_pcie(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) switch (pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) case PCI_EXP_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) case PCI_EXP_DEVCAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) case PCI_EXP_DEVCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) case PCI_EXP_DEVSTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) case PCI_EXP_LNKCAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) case PCI_EXP_LNKCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) case PCI_EXP_LNKSTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return pcie_cap_has_lnkctl(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) case PCI_EXP_SLTCAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) case PCI_EXP_SLTCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) case PCI_EXP_SLTSTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return pcie_cap_has_sltctl(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case PCI_EXP_RTCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) case PCI_EXP_RTCAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) case PCI_EXP_RTSTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return pcie_cap_has_rtctl(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) case PCI_EXP_DEVCAP2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) case PCI_EXP_DEVCTL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) case PCI_EXP_LNKCAP2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case PCI_EXP_LNKCTL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case PCI_EXP_LNKSTA2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return pcie_cap_version(dev) > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * Note that these accessor functions are only for the "PCI Express
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (pos & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (pcie_capability_reg_implemented(dev, pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Reset *val to 0 if pci_read_config_word() fails, it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * have been written as 0xFFFF if hardware error happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * during pci_read_config_word().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * For Functions that do not implement the Slot Capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Slot Status, and Slot Control registers, these spaces must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * be hardwired to 0b, with the exception of the Presence Detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * State bit in the Slot Status register of Downstream Ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pos == PCI_EXP_SLTSTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) *val = PCI_EXP_SLTSTA_PDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) EXPORT_SYMBOL(pcie_capability_read_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (pos & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (pcie_capability_reg_implemented(dev, pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Reset *val to 0 if pci_read_config_dword() fails, it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * have been written as 0xFFFFFFFF if hardware error happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * during pci_read_config_dword().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) pos == PCI_EXP_SLTSTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) *val = PCI_EXP_SLTSTA_PDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) EXPORT_SYMBOL(pcie_capability_read_dword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (pos & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!pcie_capability_reg_implemented(dev, pos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) EXPORT_SYMBOL(pcie_capability_write_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (pos & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!pcie_capability_reg_implemented(dev, pos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) EXPORT_SYMBOL(pcie_capability_write_dword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u16 clear, u16 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ret = pcie_capability_read_word(dev, pos, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) val &= ~clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ret = pcie_capability_write_word(dev, pos, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) u32 clear, u32 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = pcie_capability_read_dword(dev, pos, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) val &= ~clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ret = pcie_capability_write_dword(dev, pos, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (pci_dev_is_disconnected(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *val = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) EXPORT_SYMBOL(pci_read_config_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (pci_dev_is_disconnected(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *val = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) EXPORT_SYMBOL(pci_read_config_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int pci_read_config_dword(const struct pci_dev *dev, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (pci_dev_is_disconnected(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) *val = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) EXPORT_SYMBOL(pci_read_config_dword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (pci_dev_is_disconnected(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) EXPORT_SYMBOL(pci_write_config_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (pci_dev_is_disconnected(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) EXPORT_SYMBOL(pci_write_config_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int pci_write_config_dword(const struct pci_dev *dev, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (pci_dev_is_disconnected(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) EXPORT_SYMBOL(pci_write_config_dword);