^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2014 IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <misc/cxl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "cxl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) static int cxl_pci_probe_mode(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) return PCI_PROBE_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * MSI should never be set but need still need to provide this call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct cxl_afu *afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct cxl_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) phb = pci_bus_to_host(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) afu = (struct cxl_afu *)phb->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (!cxl_ops->link_ok(afu->adapter, afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dev->dev.archdata.dma_offset = PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Allocate a context to do cxl things too. If we eventually do real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * DMA ops, we'll need a default context to attach them to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ctx = cxl_dev_context_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (IS_ERR(ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) dev->dev.archdata.cxl_ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return (cxl_ops->afu_check_and_enable(afu) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void cxl_pci_disable_device(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct cxl_context *ctx = cxl_get_context(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (ctx->status == STARTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) dev_err(&dev->dev, "Default context started\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) dev->dev.archdata.cxl_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) cxl_release_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Should we do an AFU reset here ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return (bus << 8) + devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return phb ? phb->private_data : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void cxl_afu_configured_put(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) atomic_dec_if_positive(&afu->configured_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static bool cxl_afu_configured_get(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return atomic_inc_unless_negative(&afu->configured_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct cxl_afu *afu, int *_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) record = cxl_pcie_cfg_record(bus->number, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (record > afu->crs_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *_record = record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int offset, int len, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int rc, record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct cxl_afu *afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u8 val8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u16 val16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 val32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) afu = pci_bus_to_afu(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Grab a reader lock on afu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (afu == NULL || !cxl_afu_configured_get(afu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) rc = cxl_pcie_config_info(bus, devfn, afu, &record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *val = val8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *val = val16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *val = val32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) cxl_afu_configured_put(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int offset, int len, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int rc, record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct cxl_afu *afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) afu = pci_bus_to_afu(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Grab a reader lock on afu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (afu == NULL || !cxl_afu_configured_get(afu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) rc = cxl_pcie_config_info(bus, devfn, afu, &record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) cxl_afu_configured_put(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return rc ? PCIBIOS_SET_FAILED : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static struct pci_ops cxl_pcie_pci_ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .read = cxl_pcie_read_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .write = cxl_pcie_write_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static struct pci_controller_ops cxl_pci_controller_ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .probe_mode = cxl_pci_probe_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) .enable_device_hook = cxl_pci_enable_device_hook,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) .disable_device = cxl_pci_disable_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) .release_device = cxl_pci_disable_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .window_alignment = cxl_pci_window_alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .reset_secondary_bus = cxl_pci_reset_secondary_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .setup_msi_irqs = cxl_setup_msi_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .teardown_msi_irqs = cxl_teardown_msi_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int cxl_pci_vphb_add(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct device_node *vphb_dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct device *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * If there are no AFU configuration records we won't have anything to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * expose under the vPHB, so skip creating one, returning success since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * this is still a valid case. This will also opt us out of EEH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * handling since we won't have anything special to do if there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * kernel drivers attached to the vPHB, and EEH handling is not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * supported in the peer model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!afu->crs_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* The parent device is the adapter. Reuse the device node of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * We don't seem to care what device node is used for the vPHB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * but tools such as lsvpd walk up the device parents looking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * for a valid location code, so we might as well show devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * attached to the adapter as being located on that adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) parent = afu->adapter->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) vphb_dn = parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Alloc and setup PHB data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) phb = pcibios_alloc_controller(vphb_dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* Setup parent in sysfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) phb->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Setup the PHB using arch provided callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) phb->ops = &cxl_pcie_pci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) phb->cfg_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) phb->cfg_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) phb->private_data = afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) phb->controller_ops = cxl_pci_controller_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Scan the bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pcibios_scan_phb(phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (phb->bus == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Set release hook on root bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) pcibios_free_controller_deferred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) (void *) phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Claim resources. This might need some rework as well depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * whether we are doing probe-only or not, like assigning unassigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * resources etc...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pcibios_claim_one_bus(phb->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Add probed PCI devices to the device model */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pci_bus_add_devices(phb->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) afu->phb = phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void cxl_pci_vphb_remove(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* If there is no configuration record we won't have one of these */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!afu || !afu->phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) phb = afu->phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) afu->phb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pci_remove_root_bus(phb->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * We don't free phb here - that's handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * pcibios_free_controller_deferred()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) bool cxl_pci_is_vphb_device(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) phb = pci_bus_to_host(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return (phb->ops == &cxl_pcie_pci_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) phb = pci_bus_to_host(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return (struct cxl_afu *)phb->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);