^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCIe host controller driver for Mobiveil PCIe Host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2018 Mobiveil Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2019-2020 NXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "pcie-mobiveil.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Only one device down on each root port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (pci_is_root_bus(bus) && (devfn > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Do not read more than one device on the bus directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * attached to RC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if ((bus->primary == to_pci_host_bridge(bus->bridge)->busnr) && (PCI_SLOT(devfn) > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * mobiveil_pcie_map_bus - routine to get the configuration base of either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * root port or endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int devfn, int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct mobiveil_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct mobiveil_root_port *rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!mobiveil_pcie_valid_device(bus, devfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* RC config access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return pcie->csr_axi_slave_base + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * EP config access (in Config/APIO space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Program PEX Address base (31..16 bits) with appropriate value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Relies on pci_lock serialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) value = bus->number << PAB_BUS_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return rp->config_axi_slave_base + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static struct pci_ops mobiveil_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) .map_bus = mobiveil_pcie_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .read = pci_generic_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) .write = pci_generic_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static void mobiveil_pcie_isr(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct mobiveil_root_port *rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct mobiveil_msi *msi = &rp->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 msi_data, msi_addr_lo, msi_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 intr_status, msi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long shifted_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 bit, virq, val, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * The core provides a single interrupt for both INTx/MSI messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * So we'll read both INTx and MSI status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* read INTx status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) intr_status = val & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Handle INTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (intr_status & PAB_INTP_INTX_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) shifted_status = mobiveil_csr_readl(pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) PAB_INTP_AMBA_MISC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) shifted_status &= PAB_INTP_INTX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) shifted_status >>= PAB_INTX_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) virq = irq_find_mapping(rp->intx_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bit + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* clear interrupt handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mobiveil_csr_writel(pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 1 << (PAB_INTX_START + bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) PAB_INTP_AMBA_MISC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) shifted_status = mobiveil_csr_readl(pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) PAB_INTP_AMBA_MISC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) shifted_status &= PAB_INTP_INTX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) shifted_status >>= PAB_INTX_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) } while (shifted_status != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* read extra MSI status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* handle MSI interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) while (msi_status & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * MSI_STATUS_OFFSET register gets updated to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * once we pop not only the MSI data but also address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * from MSI hardware FIFO. So keeping these following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * two dummy reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) MSI_ADDR_L_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) MSI_ADDR_H_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) msi_data, msi_addr_hi, msi_addr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) virq = irq_find_mapping(msi->dev_domain, msi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) msi_status = readl_relaxed(pcie->apb_csr_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) MSI_STATUS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Clear the interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct platform_device *pdev = pcie->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct device_node *node = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct mobiveil_root_port *rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* map config resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "config_axi_slave");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) rp->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (IS_ERR(rp->config_axi_slave_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return PTR_ERR(rp->config_axi_slave_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) rp->ob_io_res = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* map csr resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) "csr_axi_slave");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (IS_ERR(pcie->csr_axi_slave_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return PTR_ERR(pcie->csr_axi_slave_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) pcie->pcie_reg_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* read the number of windows requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pcie->apio_wins = MAX_PIO_WINDOWS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pcie->ppio_wins = MAX_PIO_WINDOWS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) phys_addr_t msg_addr = pcie->pcie_reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct mobiveil_msi *msi = &pcie->rp.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) msi->num_of_vectors = PCI_NUM_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) msi->msi_pages_phys = (phys_addr_t)msg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) writel_relaxed(lower_32_bits(msg_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) writel_relaxed(upper_32_bits(msg_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct mobiveil_root_port *rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct pci_host_bridge *bridge = rp->bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 value, pab_ctrl, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct resource_entry *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pcie->ib_wins_configured = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pcie->ob_wins_configured = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!reinit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* setup bus numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) value &= 0xff000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) value |= 0x00ff0100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * program Bus Master Enable Bit in Command Register in PAB Config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) value = mobiveil_csr_readl(pcie, PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mobiveil_csr_writel(pcie, value, PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * PAB_AXI_PIO_CTRL Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) value |= APIO_EN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Enable PCIe PIO master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) value |= 1 << PIO_ENABLE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * we'll program one outbound window for config reads and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * another default inbound window for all the upstream traffic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * rest of the outbound windows will be configured according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * the "ranges" field defined in device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* config outbound translation window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) program_ob_windows(pcie, WIN_NUM_0, rp->ob_io_res->start, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) CFG_WINDOW_TYPE, resource_size(rp->ob_io_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* memory inbound translation window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Get the I/O and memory ranges from DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) resource_list_for_each_entry(win, &bridge->windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (resource_type(win->res) == IORESOURCE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) type = MEM_WINDOW_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) else if (resource_type(win->res) == IORESOURCE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) type = IO_WINDOW_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* configure outbound translation window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) program_ob_windows(pcie, pcie->ob_wins_configured,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) win->res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) win->res->start - win->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) type, resource_size(win->res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* fixup for PCIe class register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) value &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) value |= (PCI_CLASS_BRIDGE_PCI << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void mobiveil_mask_intx_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct irq_desc *desc = irq_to_desc(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct mobiveil_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct mobiveil_root_port *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u32 mask, shifted_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pcie = irq_desc_get_chip_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) shifted_val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void mobiveil_unmask_intx_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct irq_desc *desc = irq_to_desc(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct mobiveil_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct mobiveil_root_port *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u32 shifted_val, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pcie = irq_desc_get_chip_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) shifted_val |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static struct irq_chip intx_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) .name = "mobiveil_pcie:intx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .irq_enable = mobiveil_unmask_intx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .irq_disable = mobiveil_mask_intx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .irq_mask = mobiveil_mask_intx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .irq_unmask = mobiveil_unmask_intx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* routine to setup the INTx related data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* INTx domain operations structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static const struct irq_domain_ops intx_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .map = mobiveil_pcie_intx_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static struct irq_chip mobiveil_msi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) .name = "Mobiveil PCIe MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .irq_mask = pci_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .irq_unmask = pci_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static struct msi_domain_info mobiveil_msi_domain_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) MSI_FLAG_PCI_MSIX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .chip = &mobiveil_msi_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) msg->address_lo = lower_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) msg->address_hi = upper_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) msg->data = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) (int)data->hwirq, msg->address_hi, msg->address_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static struct irq_chip mobiveil_msi_bottom_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) .name = "Mobiveil MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) .irq_compose_msi_msg = mobiveil_compose_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) .irq_set_affinity = mobiveil_msi_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned int nr_irqs, void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct mobiveil_pcie *pcie = domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct mobiveil_msi *msi = &pcie->rp.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned long bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) WARN_ON(nr_irqs != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) mutex_lock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (bit >= msi->num_of_vectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mutex_unlock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) set_bit(bit, msi->msi_irq_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mutex_unlock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) domain->host_data, handle_level_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct irq_data *d = irq_domain_get_irq_data(domain, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct mobiveil_msi *msi = &pcie->rp.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) mutex_lock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!test_bit(d->hwirq, msi->msi_irq_in_use))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) __clear_bit(d->hwirq, msi->msi_irq_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mutex_unlock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static const struct irq_domain_ops msi_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) .alloc = mobiveil_irq_msi_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .free = mobiveil_irq_msi_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct mobiveil_msi *msi = &pcie->rp.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mutex_init(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) &msi_domain_ops, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!msi->dev_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dev_err(dev, "failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) msi->msi_domain = pci_msi_create_irq_domain(fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) &mobiveil_msi_domain_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) msi->dev_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!msi->msi_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dev_err(dev, "failed to create MSI domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) irq_domain_remove(msi->dev_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct device_node *node = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct mobiveil_root_port *rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* setup INTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) &intx_domain_ops, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!rp->intx_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) dev_err(dev, "Failed to get a INTx IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) raw_spin_lock_init(&rp->intx_mask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* setup MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return mobiveil_allocate_msi_domains(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct platform_device *pdev = pcie->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct mobiveil_root_port *rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* map MSI config resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (IS_ERR(pcie->apb_csr_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return PTR_ERR(pcie->apb_csr_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* setup MSI hardware registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) mobiveil_pcie_enable_msi(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) rp->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (rp->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return rp->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* initialize the IRQ domains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ret = mobiveil_pcie_init_irq_domain(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dev_err(dev, "Failed creating IRQ Domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) irq_set_chained_handler_and_data(rp->irq, mobiveil_pcie_isr, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) PAB_INTP_AMBA_MISC_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct mobiveil_root_port *rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (rp->ops->interrupt_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return rp->ops->interrupt_init(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return mobiveil_pcie_integrated_interrupt_init(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) u32 header_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) header_type &= 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return header_type == PCI_HEADER_TYPE_BRIDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct mobiveil_root_port *rp = &pcie->rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct pci_host_bridge *bridge = rp->bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ret = mobiveil_pcie_parse_dt(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!mobiveil_pcie_is_bridge(pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * configure all inbound and outbound windows and prepare the RC for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * config access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ret = mobiveil_host_init(pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) dev_err(dev, "Failed to initialize host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ret = mobiveil_pcie_interrupt_init(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dev_err(dev, "Interrupt init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Initialize bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) bridge->sysdata = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) bridge->ops = &mobiveil_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ret = mobiveil_bringup_link(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dev_info(dev, "link bring-up failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return pci_host_probe(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }