^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * MPC83xx/85xx/86xx PCI/PCIE support routing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2007-2012 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2008-2009 MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Initial author: Xianghua Xiao <x.xiao@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Recode: ZHANG WEI <wei.zhang@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Rewrite the routing for Frescale PCI and PCI Express
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Roy Zang <tie-fei.zang@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * MPC83xx PCI-Express support:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Tony Li <tony.li@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Anton Vorontsov <avorontsov@ru.mvista.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/fsl/edac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/pci-bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/ppc-pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/mpc85xx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/disassemble.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <sysdev/fsl_soc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <sysdev/fsl_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void quirk_fsl_pcie_early(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u8 hdr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* if we aren't a PCIe don't bother */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!pci_is_pcie(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* if we aren't in host mode don't bother */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) dev->class = PCI_CLASS_BRIDGE_PCI << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) fsl_pcie_bus_fixup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int, int, u32 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static int fsl_pcie_check_link(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (hose->ops->read == fsl_indirect_read_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __indirect_read_config(hose, hose->first_busno, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) PCIE_LTSSM, 4, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (val < PCIE_LTSSM_L0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct ccsr_pci __iomem *pci = hose->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* for PCIe IP rev 3.0 or greater use CSR0 for link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) >> PEX_CSR0_LTSSM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (val != PEX_CSR0_LTSSM_L0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int offset, int len, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (fsl_pcie_check_link(hose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return indirect_read_config(bus, devfn, offset, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static struct pci_ops fsl_indirect_pcie_ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) .read = fsl_indirect_read_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .write = indirect_write_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static u64 pci64_dma_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifdef CONFIG_SWIOTLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct pci_controller *hose = pci_bus_to_host(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pdev->dev.bus_dma_limit =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) hose->dma_window_base_cur + hose->dma_window_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void setup_swiotlb_ops(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (ppc_swiotlb_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Fix up PCI devices that are able to DMA to the large inbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * mapping that allows addressing any RAM address from across PCI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) dev->bus_dma_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) dev->archdata.dma_offset = pci64_dma_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int setup_one_atmu(struct ccsr_pci __iomem *pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned int index, const struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) resource_size_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) resource_size_t pci_addr = res->start - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) resource_size_t phys_addr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) resource_size_t size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u32 flags = 0x80044000; /* enable & mem R/W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) (u64)res->start, (u64)size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (res->flags & IORESOURCE_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) flags |= 0x10000000; /* enable relaxed ordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) for (i = 0; size > 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned int bits = min_t(u32, ilog2(size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __ffs(pci_addr | phys_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (index + i >= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pci_addr += (resource_size_t)1U << bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) phys_addr += (resource_size_t)1U << bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) size -= (resource_size_t)1U << bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static bool is_kdump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) node = of_find_node_by_type(NULL, "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return of_property_read_bool(node, "linux,usable-memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* atmu setup for fsl pci/pcie controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void setup_pci_atmu(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct ccsr_pci __iomem *pci = hose->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u64 mem, sz, paddr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u64 offset = 0, paddr_lo = ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 pcicsrbar = 0, pcicsrbar_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) const u64 *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bool setup_inbound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * If this is kdump, we don't want to trigger a bunch of PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * errors by closing the window on in-flight DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * We still run most of the function's logic so that things like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * hose->dma_window_size still get set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) setup_inbound = !is_kdump();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * BSC9132 Rev1.0 has an issue where all the PEX inbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * windows have implemented the default target value as 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * for CCSR space.In all Freescale legacy devices the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * of 0xf is reserved for local memory space. 9132 Rev1.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * now has local mempry space mapped to target 0x0 instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * 0xf. Hence adding a workaround to remove the target 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * defined for memory space from Inbound window attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) piwar &= ~PIWAR_TGI_LOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) win_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) start_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) end_idx = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Disable all windows (except powar0 since it's ignored) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) for(i = 1; i < 5; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) out_be32(&pci->pow[i].powar, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (setup_inbound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) for (i = start_idx; i < end_idx; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) out_be32(&pci->piw[i].piwar, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* Setup outbound MEM window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) for(i = 0, j = 1; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* We assume all memory resources have the same offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) offset = hose->mem_offset[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (n < 0 || j >= 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) j += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Setup outbound IO window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (hose->io_resource.flags & IORESOURCE_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (j >= 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) "phy base 0x%016llx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) (u64)hose->io_resource.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) (u64)resource_size(&hose->io_resource),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) (u64)hose->io_base_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) out_be32(&pci->pow[j].potear, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Enable, IO R/W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) out_be32(&pci->pow[j].powar, 0x80088000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) | (ilog2(hose->io_resource.end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) - hose->io_resource.start + 1) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* convert to pci address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) paddr_hi -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) paddr_lo -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (paddr_hi == paddr_lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pr_err("%pOF: No outbound window space\n", hose->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (paddr_lo == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) pr_err("%pOF: No space for inbound window\n", hose->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* setup PCSRBAR/PEXCSRBAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pcicsrbar_sz = ~pcicsrbar_sz + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) (paddr_lo > 0x100000000ull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) pcicsrbar = 0x100000000ull - pcicsrbar_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) paddr_lo = min(paddr_lo, (u64)pcicsrbar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* Setup inbound mem window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) mem = memblock_end_of_DRAM();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pr_info("%s: end of DRAM %llx\n", __func__, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * The msi-address-64 property, if it exists, indicates the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * address of the MSIIR register. Normally, this register is located
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * inside CCSR, so the ATMU that covers all of CCSR is used. But if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * this property exists, then we normally need to create a new ATMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * for it. For now, however, we cheat. The only entity that creates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * this property is the Freescale hypervisor, and the address is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * specified in the partition configuration. Typically, the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * is located in the page immediately after the end of DDR. If so, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * can avoid allocating a new ATMU by extending the DDR ATMU by one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) reg = of_get_property(hose->dn, "msi-address-64", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (reg && (len == sizeof(u64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u64 address = be64_to_cpup(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mem += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* TODO: Create a new ATMU for MSIIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pr_warn("%pOF: msi-address-64 address of %llx is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) "unsupported\n", hose->dn, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) sz = min(mem, paddr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) mem_log = ilog2(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* PCIe can overmap inbound & outbound since RX & TX are separated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Size window to exact size if power-of-two or one size up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if ((1ull << mem_log) != mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) mem_log++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if ((1ull << mem_log) > mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pr_info("%pOF: Setting PCI inbound window "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) "greater than memory size\n", hose->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (setup_inbound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Setup inbound memory window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) out_be32(&pci->piw[win_idx].pitar, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) out_be32(&pci->piw[win_idx].piwar, piwar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) win_idx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) hose->dma_window_base_cur = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) hose->dma_window_size = (resource_size_t)sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * if we have >4G of memory setup second PCI inbound window to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * let devices that are 64-bit address capable to work w/o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * SWIOTLB and access the full range of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (sz != mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) mem_log = ilog2(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Size window up if we dont fit in exact power-of-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if ((1ull << mem_log) != mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) mem_log++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) pci64_dma_offset = 1ULL << mem_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (setup_inbound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Setup inbound memory window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) out_be32(&pci->piw[win_idx].pitar, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) out_be32(&pci->piw[win_idx].piwbear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) pci64_dma_offset >> 44);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) out_be32(&pci->piw[win_idx].piwbar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pci64_dma_offset >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) out_be32(&pci->piw[win_idx].piwar, piwar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * install our own dma_set_mask handler to fixup dma_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * and dma_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u64 paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (setup_inbound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Setup inbound memory window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) out_be32(&pci->piw[win_idx].pitar, paddr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) out_be32(&pci->piw[win_idx].piwar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) (piwar | (mem_log - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) win_idx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) paddr += 1ull << mem_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sz -= 1ull << mem_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mem_log = ilog2(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) piwar |= (mem_log - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (setup_inbound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) out_be32(&pci->piw[win_idx].pitar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) paddr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) out_be32(&pci->piw[win_idx].piwbar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) paddr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) out_be32(&pci->piw[win_idx].piwar, piwar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) win_idx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) paddr += 1ull << mem_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) hose->dma_window_base_cur = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) hose->dma_window_size = (resource_size_t)paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (hose->dma_window_size < mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) #ifdef CONFIG_SWIOTLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ppc_swiotlb_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) "map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) hose->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* adjusting outbound windows could reclaim space in mem map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (paddr_hi < 0xffffffffull)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pr_warn("%pOF: WARNING: Outbound window cfg leaves "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) "gaps in memory map. Adjusting the memory map "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) "could reduce unnecessary bounce buffering.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) hose->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) (u64)hose->dma_window_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void __init setup_pci_cmd(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u16 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int cap_x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) | PCI_COMMAND_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (cap_x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int pci_x_cmd = cap_x + PCI_X_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) void fsl_pcibios_fixup_bus(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int i, is_pcie = 0, no_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* The root complex bridge comes up with bogus resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * we copy the PHB ones in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * With the current generic PCI code, the PHB bus no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * has bus->resource[0..4] set, so things are a bit more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * tricky.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (fsl_pcie_bus_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (bus->parent == hose->bus && (is_pcie || no_link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct resource *res = bus->resource[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct resource *par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) par = &hose->io_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) else if (i < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) par = &hose->mem_resources[i-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) else par = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) res->start = par ? par->start : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) res->end = par ? par->end : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) res->flags = par ? par->flags : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int fsl_add_bridge(struct platform_device *pdev, int is_primary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct resource rsrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) const int *bus_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) u8 hdr_type, progif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct device_node *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct ccsr_pci __iomem *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u16 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) u32 svr = mfspr(SPRN_SVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dev = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!of_device_is_available(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pr_warn("%pOF: disabled\n", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pr_debug("Adding PCI host bridge %pOF\n", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* Fetch host bridge registers address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (of_address_to_resource(dev, 0, &rsrc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) printk(KERN_WARNING "Can't get pci register base!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Get bus range if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bus_range = of_get_property(dev, "bus-range", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (bus_range == NULL || len < 2 * sizeof(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) " bus 0\n", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) pci_add_flags(PCI_REASSIGN_ALL_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) hose = pcibios_alloc_controller(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* set platform device as the parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) hose->parent = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) hose->first_busno = bus_range ? bus_range[0] : 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) hose->last_busno = bus_range ? bus_range[1] : 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) (u64)rsrc.start, (u64)resource_size(&rsrc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!hose->private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) goto no_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) PPC_INDIRECT_TYPE_BIG_ENDIAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* use fsl_indirect_read_config for PCIe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) hose->ops = &fsl_indirect_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* For PCIE read HEADER_TYPE to identify controller mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto no_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* For PCI read PROG to identify controller mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if ((progif & 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) goto no_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) setup_pci_cmd(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* check PCI express link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (fsl_pcie_check_link(hose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * Set PBFR(PCI Bus Function Register)[10] = 1 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * disable the combining of crossing cacheline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * boundary requests into one burst transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * PCI-X operation is not affected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * Fix erratum PCI 5 on MPC8548
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) #define PCI_BUS_FUNCTION 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) #define PCI_BUS_FUNCTION_MDS 0x400 /* Master disable streaming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (((SVR_SOC_VER(svr) == SVR_8543) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) (SVR_SOC_VER(svr) == SVR_8545) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) (SVR_SOC_VER(svr) == SVR_8547) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) (SVR_SOC_VER(svr) == SVR_8548)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) early_read_config_word(hose, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) PCI_BUS_FUNCTION, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) temp |= PCI_BUS_FUNCTION_MDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) early_write_config_word(hose, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) PCI_BUS_FUNCTION, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) "Firmware bus number: %d->%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) (unsigned long long)rsrc.start, hose->first_busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) hose->last_busno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) hose, hose->cfg_addr, hose->cfg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Interpret the "ranges" property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* This also maps the I/O region and sets isa_io/mem_base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pci_process_bridge_OF_ranges(hose, dev, is_primary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Setup PEX window registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) setup_pci_atmu(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* Set up controller operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) setup_swiotlb_ops(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) no_bridge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) iounmap(hose->private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* unmap cfg_data & cfg_addr separately if not on same page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ((unsigned long)hose->cfg_addr & PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) iounmap(hose->cfg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) iounmap(hose->cfg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pcibios_free_controller(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) quirk_fsl_pcie_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct mpc83xx_pcie_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) void __iomem *cfg_type0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) void __iomem *cfg_type1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) u32 dev_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct pex_inbound_window {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) u32 ar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) u32 tar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) u32 barl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u32 barh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * With the convention of u-boot, the PCIE outbound window 0 serves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * as configuration transactions outbound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) #define PEX_OUTWIN0_BAR 0xCA4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) #define PEX_OUTWIN0_TAL 0xCA8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) #define PEX_OUTWIN0_TAH 0xCAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) #define PEX_RC_INWIN_BASE 0xE60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #define PEX_RCIWARn_EN 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * Workaround for the HW bug: for Type 0 configure transactions the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * PCI-E controller does not check the device number bits and just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * assumes that the device number bits are 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (bus->number == hose->first_busno ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) bus->primary == hose->first_busno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (devfn & 0xf8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (ppc_md.pci_exclude_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) unsigned int devfn, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct mpc83xx_pcie_priv *pcie = hose->dn->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) u32 dev_base = bus->number << 24 | devfn << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = mpc83xx_pcie_exclude_device(bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) offset &= 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* Type 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (bus->number == hose->first_busno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return pcie->cfg_type0 + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (pcie->dev_base == dev_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) goto mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pcie->dev_base = dev_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) mapped:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return pcie->cfg_type1 + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int offset, int len, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) val &= 0xffffff00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return pci_generic_config_write(bus, devfn, offset, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static struct pci_ops mpc83xx_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .map_bus = mpc83xx_pcie_remap_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .read = pci_generic_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .write = mpc83xx_pcie_write_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct resource *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct mpc83xx_pcie_priv *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) u32 cfg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (!pcie->cfg_type0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (!cfg_bar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* PCI-E isn't configured. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (!pcie->cfg_type1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) WARN_ON(hose->dn->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) hose->dn->data = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) hose->ops = &mpc83xx_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (fsl_pcie_check_link(hose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) iounmap(pcie->cfg_type0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) kfree(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) int __init mpc83xx_add_bridge(struct device_node *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct resource rsrc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct resource rsrc_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) const int *bus_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int primary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) is_mpc83xx_pci = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!of_device_is_available(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) pr_warn("%pOF: disabled by the firmware.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) pr_debug("Adding PCI host bridge %pOF\n", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Fetch host bridge registers address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (of_address_to_resource(dev, 0, &rsrc_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) printk(KERN_WARNING "Can't get pci register base!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) "No pci config register base in dev tree, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) "using default\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * MPC83xx supports up to two host controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * one at 0x8500 has config space registers at 0x8300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * one at 0x8600 has config space registers at 0x8380
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if ((rsrc_reg.start & 0xfffff) == 0x8500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) else if ((rsrc_reg.start & 0xfffff) == 0x8600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Controller at offset 0x8500 is primary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if ((rsrc_reg.start & 0xfffff) == 0x8500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) primary = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) primary = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* Get bus range if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) bus_range = of_get_property(dev, "bus-range", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (bus_range == NULL || len < 2 * sizeof(int)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) " bus 0\n", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) pci_add_flags(PCI_REASSIGN_ALL_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) hose = pcibios_alloc_controller(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (!hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) hose->first_busno = bus_range ? bus_range[0] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) hose->last_busno = bus_range ? bus_range[1] : 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) setup_indirect_pci(hose, rsrc_cfg.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) rsrc_cfg.start + 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) "Firmware bus number: %d->%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) (unsigned long long)rsrc_reg.start, hose->first_busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) hose->last_busno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) hose, hose->cfg_addr, hose->cfg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* Interpret the "ranges" property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* This also maps the I/O region and sets isa_io/mem_base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) pci_process_bridge_OF_ranges(hose, dev, primary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) pcibios_free_controller(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) #endif /* CONFIG_PPC_83xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) u64 fsl_pci_immrbar_base(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) #ifdef CONFIG_PPC_83xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (is_mpc83xx_pci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct mpc83xx_pcie_priv *pcie = hose->dn->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct pex_inbound_window *in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* Walk the Root Complex Inbound windows to match IMMR base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* not enabled, skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (get_immrbase() == in_le32(&in[i].tar))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return (u64)in_le32(&in[i].barh) << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) in_le32(&in[i].barl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!is_mpc83xx_pci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u32 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pci_bus_read_config_dword(hose->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * For PEXCSRBAR, bit 3-0 indicate prefetchable and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * address type. So when getting base address, these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * bits should be masked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) base &= PCI_BASE_ADDRESS_MEM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) #ifdef CONFIG_E500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) unsigned int rd, ra, rb, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rd = get_rt(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ra = get_ra(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) rb = get_rb(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) d = get_d(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) switch (get_op(inst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) case 31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) switch (get_xop(inst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) case OP_31_XOP_LWZX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) case OP_31_XOP_LWBRX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) regs->gpr[rd] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) case OP_31_XOP_LWZUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) regs->gpr[rd] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) regs->gpr[ra] += regs->gpr[rb];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) case OP_31_XOP_LBZX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) regs->gpr[rd] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) case OP_31_XOP_LBZUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) regs->gpr[rd] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) regs->gpr[ra] += regs->gpr[rb];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) case OP_31_XOP_LHZX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) case OP_31_XOP_LHBRX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) regs->gpr[rd] = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) case OP_31_XOP_LHZUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) regs->gpr[rd] = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) regs->gpr[ra] += regs->gpr[rb];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case OP_31_XOP_LHAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) regs->gpr[rd] = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case OP_31_XOP_LHAUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) regs->gpr[rd] = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) regs->gpr[ra] += regs->gpr[rb];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) case OP_LWZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) regs->gpr[rd] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) case OP_LWZU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) regs->gpr[rd] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) regs->gpr[ra] += (s16)d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) case OP_LBZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) regs->gpr[rd] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) case OP_LBZU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) regs->gpr[rd] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) regs->gpr[ra] += (s16)d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case OP_LHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) regs->gpr[rd] = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) case OP_LHZU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) regs->gpr[rd] = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) regs->gpr[ra] += (s16)d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) case OP_LHA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) regs->gpr[rd] = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) case OP_LHAU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) regs->gpr[rd] = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) regs->gpr[ra] += (s16)d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static int is_in_pci_mem_space(phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) list_for_each_entry(hose, &hose_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) res = &hose->mem_resources[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if ((res->flags & IORESOURCE_MEM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) addr >= res->start && addr <= res->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int fsl_pci_mcheck_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) u32 inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) phys_addr_t addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* Let KVM/QEMU deal with the exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (regs->msr & MSR_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) #ifdef CONFIG_PHYS_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) addr = mfspr(SPRN_MCARU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) addr <<= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) addr += mfspr(SPRN_MCAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (is_in_pci_mem_space(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ret = copy_from_user_nofault(&inst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) (void __user *)regs->nip, sizeof(inst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) ret = get_kernel_nofault(inst, (void *)regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (!ret && mcheck_handle_load(regs, inst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) regs->nip += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static const struct of_device_id pci_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) { .compatible = "fsl,mpc8540-pci", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) { .compatible = "fsl,mpc8548-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) { .compatible = "fsl,mpc8610-pci", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) { .compatible = "fsl,mpc8641-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) { .compatible = "fsl,qoriq-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) { .compatible = "fsl,qoriq-pcie-v2.1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) { .compatible = "fsl,qoriq-pcie-v2.2", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) { .compatible = "fsl,qoriq-pcie-v2.3", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) { .compatible = "fsl,qoriq-pcie-v2.4", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) { .compatible = "fsl,qoriq-pcie-v3.0", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * The following entries are for compatibility with older device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) { .compatible = "fsl,p1022-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) { .compatible = "fsl,p4080-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct device_node *fsl_pci_primary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) void fsl_pci_assign_primary(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Callers can specify the primary bus using other means. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (fsl_pci_primary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* If a PCI host bridge contains an ISA node, it's primary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) np = of_find_node_by_type(NULL, "isa");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) while ((fsl_pci_primary = of_get_parent(np))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) np = fsl_pci_primary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (of_match_node(pci_ids, np) && of_device_is_available(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * If there's no PCI host bridge with ISA, arbitrarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * designate one as primary. This can go away once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * various bugs with primary-less systems are fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) for_each_matching_node(np, pci_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (of_device_is_available(np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) fsl_pci_primary = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct pci_controller *hose = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct ccsr_pci __iomem *pci = hose->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) u32 dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) dr = in_be32(&pci->pex_pme_mes_dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) out_be32(&pci->pex_pme_mes_dr, dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static int fsl_pci_pme_probe(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct ccsr_pci __iomem *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) int pme_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) u16 pms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* Get hose's pci_dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* PME Disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) pms &= ~PCI_PM_CTRL_PME_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) pme_irq = irq_of_parse_and_map(hose->dn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (!pme_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) dev_err(&dev->dev, "Failed to map PME interrupt.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) res = devm_request_irq(hose->parent, pme_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) fsl_pci_pme_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) "[PCI] PME", hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) irq_dispose_mapping(pme_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) pci = hose->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* Enable PTOD, ENL23D & EXL23D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) clrbits32(&pci->pex_pme_mes_disr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) out_be32(&pci->pex_pme_mes_ier, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) setbits32(&pci->pex_pme_mes_ier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* PME Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) pms |= PCI_PM_CTRL_PME_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static void send_pme_turnoff_message(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct ccsr_pci __iomem *pci = hose->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) u32 dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /* Send PME_Turn_Off Message Request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* Wait trun off done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) for (i = 0; i < 150; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) dr = in_be32(&pci->pex_pme_mes_dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (dr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) out_be32(&pci->pex_pme_mes_dr, dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) send_pme_turnoff_message(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int fsl_pci_syscore_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct pci_controller *hose, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) fsl_pci_syscore_do_suspend(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct ccsr_pci __iomem *pci = hose->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) u32 dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* Send Exit L2 State Message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* Wait exit done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) for (i = 0; i < 150; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) dr = in_be32(&pci->pex_pme_mes_dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (dr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) out_be32(&pci->pex_pme_mes_dr, dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) setup_pci_atmu(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void fsl_pci_syscore_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct pci_controller *hose, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) fsl_pci_syscore_do_resume(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static struct syscore_ops pci_syscore_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) .suspend = fsl_pci_syscore_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .resume = fsl_pci_syscore_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) void fsl_pcibios_fixup_phb(struct pci_controller *phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) fsl_pci_pme_probe(phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static int add_err_dev(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct platform_device *errdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct mpc85xx_edac_pci_plat_data pd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) .of_node = pdev->dev.of_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) errdev = platform_device_register_resndata(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) "mpc85xx-pci-edac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) PLATFORM_DEVID_AUTO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) pdev->resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) pdev->num_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) &pd, sizeof(pd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return PTR_ERR_OR_ZERO(errdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static int fsl_pci_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) ret = add_err_dev(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) dev_err(&pdev->dev, "couldn't register error device: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static struct platform_driver fsl_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .name = "fsl-pci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .of_match_table = pci_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) .probe = fsl_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static int __init fsl_pci_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) register_syscore_ops(&pci_syscore_pm_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return platform_driver_register(&fsl_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) arch_initcall(fsl_pci_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) #endif