^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ACPI PCIe host controller driver for Rockchip SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2022 Rockchip Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * http://www.rock-chips.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pci-ecam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pci-acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "../../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define DWC_ATU_REGION_INDEX1 (0x1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define ECAM_RESV_SIZE SZ_16M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct rk_pcie_acpi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void __iomem *dbi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void __iomem *cfg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) phys_addr_t mcfg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void rk_pcie_writel_ob_unroll(void __iomem *dbi_base, u32 index, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) writel(val, dbi_base + offset + reg + DEFAULT_DBI_ATU_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static u32 rk_pcie_readl_ob_unroll(void __iomem *dbi_base, u32 index, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return readl(dbi_base + offset + reg + DEFAULT_DBI_ATU_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static void rk_pcie_prog_outbound_atu_unroll(struct device *dev, void __iomem *dbi_base, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 type, u64 cpu_addr, u64 pci_addr, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 retries, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) dev_dbg(dev, "%s: ATU programmed with: index: %d, type: %d, cpu addr: %8llx, pci addr: %8llx, size: %8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) __func__, index, type, cpu_addr, pci_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) rk_pcie_writel_ob_unroll(dbi_base, index, PCIE_ATU_UNR_LOWER_BASE, lower_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) rk_pcie_writel_ob_unroll(dbi_base, index, PCIE_ATU_UNR_UPPER_BASE, upper_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) rk_pcie_writel_ob_unroll(dbi_base, index, PCIE_ATU_UNR_LOWER_LIMIT, lower_32_bits(cpu_addr + size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) rk_pcie_writel_ob_unroll(dbi_base, index, PCIE_ATU_UNR_UPPER_LIMIT, upper_32_bits(cpu_addr + size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) rk_pcie_writel_ob_unroll(dbi_base, index, PCIE_ATU_UNR_LOWER_TARGET, lower_32_bits(pci_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) rk_pcie_writel_ob_unroll(dbi_base, index, PCIE_ATU_UNR_UPPER_TARGET, upper_32_bits(pci_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) rk_pcie_writel_ob_unroll(dbi_base, index, PCIE_ATU_UNR_REGION_CTRL1, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) rk_pcie_writel_ob_unroll(dbi_base, index, PCIE_ATU_UNR_REGION_CTRL2, PCIE_ATU_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Make sure ATU enable takes effect before any subsequent config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * and I/O accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) val = rk_pcie_readl_ob_unroll(dbi_base, index, PCIE_ATU_UNR_REGION_CTRL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (val & PCIE_ATU_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) mdelay(LINK_WAIT_IATU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) dev_err(dev, "outbound iATU is not being enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int rk_pcie_ecam_init(struct pci_config_window *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct device *dev = cfg->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct acpi_device *adev = to_acpi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct acpi_pci_root *root = acpi_driver_data(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) phys_addr_t mcfg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct rk_pcie_acpi *rk_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) rk_pcie = devm_kzalloc(dev, sizeof(*rk_pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Retrieve RC base and size from a RKCP0001 device with _UID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * matching our segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ret = acpi_get_rc_resources(dev, "RKCP0001", root->segment, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) dev_err(dev, "can't get rc base (DBI) address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) dev_info(dev, "DBI address is %pa\n", &res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) rk_pcie->dbi_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!rk_pcie->dbi_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mcfg_addr = acpi_pci_root_get_mcfg_addr(adev->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!mcfg_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) dev_err(dev, "can't get mcfg base (cfg) address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dev_info(dev, "mcfg address is %pa\n", &mcfg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rk_pcie->mcfg_addr = mcfg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) rk_pcie->cfg_base = devm_pci_remap_cfgspace(dev, mcfg_addr, SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!rk_pcie->cfg_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) cfg->priv = rk_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int rk_pcie_ecam_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct pci_config_window *cfg = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int dev = PCI_SLOT(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* access only one slot on each root port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (bus->number == cfg->busr.start && dev > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return pci_generic_config_read(bus, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int rk_pcie_ecam_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct pci_config_window *cfg = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int dev = PCI_SLOT(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* access only one slot on each root port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (bus->number == cfg->busr.start && dev > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return pci_generic_config_write(bus, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void __iomem *rk_pcie_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct pci_config_window *cfg = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct rk_pcie_acpi *rk_pcie = cfg->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 atu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u32 busdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* read RC config space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (bus->number == cfg->busr.start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return rk_pcie->dbi_base + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (pci_is_root_bus(bus->parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) atu_type = PCIE_ATU_TYPE_CFG0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) atu_type = PCIE_ATU_TYPE_CFG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) busdev = PCIE_ATU_BUS(bus->number) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) PCIE_ATU_DEV(PCI_SLOT(devfn)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) PCIE_ATU_FUNC(PCI_FUNC(devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * UEFI region mapping relation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * index0: 32-bit np memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * index1: config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * index2: IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * index3: 64-bit np memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) rk_pcie_prog_outbound_atu_unroll(cfg->parent, rk_pcie->dbi_base, DWC_ATU_REGION_INDEX1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) atu_type, (u64)rk_pcie->mcfg_addr, busdev, ECAM_RESV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) dev_dbg(cfg->parent, "Read other config: 0x%p where = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rk_pcie->cfg_base + where, where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return rk_pcie->cfg_base + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) const struct pci_ecam_ops rk_pcie_ecam_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .bus_shift = 20, /* We don't need this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .init = rk_pcie_ecam_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .pci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .map_bus = rk_pcie_ecam_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) .read = rk_pcie_ecam_rd_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .write = rk_pcie_ecam_wr_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif