^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 2019 - 2020, Xilinx, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqchip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pci-ecam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Register definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define XILINX_CPM_PCIE_REG_IDR 0x00000E10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define XILINX_CPM_PCIE_REG_IMR 0x00000E14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Interrupt registers definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define XILINX_CPM_PCIE_INTR_HOT_RESET 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define XILINX_CPM_PCIE_INTR_CORRECTABLE 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define XILINX_CPM_PCIE_INTR_NONFATAL 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define XILINX_CPM_PCIE_INTR_FATAL 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define XILINX_CPM_PCIE_INTR_INTX 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define XILINX_CPM_PCIE_INTR_SLV_COMPL 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define XILINX_CPM_PCIE_INTR_SLV_ERRP 23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define XILINX_CPM_PCIE_INTR_MST_DECERR 26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define XILINX_CPM_PCIE_INTR_MST_SLVERR 27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define XILINX_CPM_PCIE_IMR_ALL_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) IMR(LINK_DOWN) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) IMR(HOT_RESET) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) IMR(CFG_PCIE_TIMEOUT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) IMR(CFG_TIMEOUT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) IMR(CORRECTABLE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) IMR(NONFATAL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) IMR(FATAL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) IMR(CFG_ERR_POISON) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) IMR(PME_TO_ACK_RCVD) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) IMR(INTX) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) IMR(PM_PME_RCVD) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) IMR(SLV_UNSUPP) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) IMR(SLV_UNEXP) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) IMR(SLV_COMPL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) IMR(SLV_ERRP) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) IMR(SLV_CMPABT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) IMR(SLV_ILLBUR) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) IMR(MST_DECERR) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) IMR(MST_SLVERR) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) IMR(SLV_PCIE_TIMEOUT) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define XILINX_CPM_PCIE_IDRN_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Root Port Error FIFO Read Register definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Root Port Status/control Register definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Phy Status/Control Register definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * struct xilinx_cpm_pcie_port - PCIe port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * @reg_base: Bridge Register Base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * @dev: Device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * @intx_domain: Legacy IRQ domain pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @cpm_domain: CPM IRQ domain pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * @cfg: Holds mappings of config space window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @intx_irq: legacy interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @irq: Error interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @lock: lock protecting shared register access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct xilinx_cpm_pcie_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void __iomem *cpm_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct irq_domain *intx_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct irq_domain *cpm_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct pci_config_window *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int intx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return readl_relaxed(port->reg_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void pcie_write(struct xilinx_cpm_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 val, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) writel_relaxed(val, port->reg_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) XILINX_CPM_PCIE_REG_PSCR_LNKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dev_dbg(port->dev, "Requester ID %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) XILINX_CPM_PCIE_REG_RPEFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) raw_spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) raw_spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void xilinx_cpm_unmask_leg_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) raw_spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) raw_spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static struct irq_chip xilinx_cpm_leg_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) .name = "INTx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .irq_mask = xilinx_cpm_mask_leg_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .irq_unmask = xilinx_cpm_unmask_leg_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @domain: IRQ domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @irq: Virtual IRQ number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @hwirq: HW interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Return: Always returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned int irq, irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) irq_set_status_flags(irq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* INTx IRQ Domain operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static const struct irq_domain_ops intx_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .map = xilinx_cpm_pcie_intx_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) for_each_set_bit(i, &val, PCI_NUM_INTX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) generic_handle_irq(irq_find_mapping(port->intx_domain, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void xilinx_cpm_mask_event_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) raw_spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) val &= ~BIT(d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) raw_spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void xilinx_cpm_unmask_event_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) raw_spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) val |= BIT(d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) raw_spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static struct irq_chip xilinx_cpm_event_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) .name = "RC-Event",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .irq_mask = xilinx_cpm_mask_event_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .irq_unmask = xilinx_cpm_unmask_event_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static int xilinx_cpm_pcie_event_map(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned int irq, irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) irq_set_status_flags(irq, IRQ_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static const struct irq_domain_ops event_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .map = xilinx_cpm_pcie_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) for_each_set_bit(i, &val, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) generic_handle_irq(irq_find_mapping(port->cpm_domain, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * CPM SLCR block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) writel_relaxed(val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define _IC(x, s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) const char *sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) const char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) } intr_cause[32] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) _IC(LINK_DOWN, "Link Down"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) _IC(HOT_RESET, "Hot reset"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) _IC(CFG_TIMEOUT, "ECAM access timeout"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) _IC(CORRECTABLE, "Correctable error message"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) _IC(NONFATAL, "Non fatal error message"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) _IC(FATAL, "Fatal error message"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) _IC(SLV_UNSUPP, "Slave unsupported request"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) _IC(SLV_UNEXP, "Slave unexpected completion"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) _IC(SLV_COMPL, "Slave completion timeout"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) _IC(SLV_ERRP, "Slave Error Poison"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) _IC(SLV_CMPABT, "Slave Completer Abort"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) _IC(SLV_ILLBUR, "Slave Illegal Burst"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) _IC(MST_DECERR, "Master decode error"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) _IC(MST_SLVERR, "Master slave error"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) _IC(CFG_ERR_POISON, "ECAM poisoned completion received"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) _IC(PM_PME_RCVD, "PM_PME message received"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct xilinx_cpm_pcie_port *port = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct irq_data *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) d = irq_domain_get_irq_data(port->cpm_domain, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) switch (d->hwirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) case XILINX_CPM_PCIE_INTR_CORRECTABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) case XILINX_CPM_PCIE_INTR_NONFATAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case XILINX_CPM_PCIE_INTR_FATAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) cpm_pcie_clear_err_interrupts(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (intr_cause[d->hwirq].str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (port->intx_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) irq_domain_remove(port->intx_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) port->intx_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (port->cpm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) irq_domain_remove(port->cpm_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) port->cpm_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @port: PCIe port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Return: '0' on success and error value on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct device_node *node = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct device_node *pcie_intc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Setup INTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) pcie_intc_node = of_get_next_child(node, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!pcie_intc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dev_err(dev, "No PCIe Intc node found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) &event_domain_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!port->cpm_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) &intx_domain_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (!port->intx_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) of_node_put(pcie_intc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) raw_spin_lock_init(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) xilinx_cpm_free_irq_domains(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) of_node_put(pcie_intc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) dev_err(dev, "Failed to allocate IRQ domains\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int i, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) port->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (port->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return port->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!intr_cause[i].str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) irq = irq_create_mapping(port->cpm_domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dev_err(dev, "Failed to map interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 0, intr_cause[i].sym, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dev_err(dev, "Failed to request IRQ %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) port->intx_irq = irq_create_mapping(port->cpm_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) XILINX_CPM_PCIE_INTR_INTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!port->intx_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dev_err(dev, "Failed to map INTx interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* Plug the INTx chained handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) irq_set_chained_handler_and_data(port->intx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) xilinx_cpm_pcie_intx_flow, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* Plug the main event chained handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) irq_set_chained_handler_and_data(port->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) xilinx_cpm_pcie_event_flow, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * xilinx_cpm_pcie_init_port - Initialize hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * @port: PCIe port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (cpm_pcie_link_up(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) dev_info(port->dev, "PCIe Link is UP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dev_info(port->dev, "PCIe Link is DOWN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) XILINX_CPM_PCIE_REG_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Clear pending interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) XILINX_CPM_PCIE_IMR_ALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) XILINX_CPM_PCIE_REG_IDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * CPM SLCR block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Enable the Bridge enable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) XILINX_CPM_PCIE_REG_RPSC_BEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) XILINX_CPM_PCIE_REG_RPSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * xilinx_cpm_pcie_parse_dt - Parse Device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * @port: PCIe port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * @bus_range: Bus resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * Return: '0' on success and error value on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct resource *bus_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) port->cpm_base = devm_platform_ioremap_resource_byname(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) "cpm_slcr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (IS_ERR(port->cpm_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return PTR_ERR(port->cpm_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) port->cfg = pci_ecam_create(dev, res, bus_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) &pci_generic_ecam_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (IS_ERR(port->cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return PTR_ERR(port->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) port->reg_base = port->cfg->win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) irq_set_chained_handler_and_data(port->irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * xilinx_cpm_pcie_probe - Probe function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * @pdev: Platform device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Return: '0' on success and error value on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct xilinx_cpm_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct resource_entry *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) port = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) port->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) err = xilinx_cpm_pcie_init_irq_domain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) err = xilinx_cpm_pcie_parse_dt(port, bus->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dev_err(dev, "Parsing DT failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) goto err_parse_dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) xilinx_cpm_pcie_init_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) err = xilinx_cpm_setup_irq(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dev_err(dev, "Failed to set up interrupts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) goto err_setup_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) bridge->sysdata = port->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) err = pci_host_probe(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) goto err_host_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) err_host_bridge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) xilinx_cpm_free_interrupts(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) err_setup_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pci_ecam_free(port->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) err_parse_dt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) xilinx_cpm_free_irq_domains(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) { .compatible = "xlnx,versal-cpm-host-1.00", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static struct platform_driver xilinx_cpm_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .name = "xilinx-cpm-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .of_match_table = xilinx_cpm_pcie_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .probe = xilinx_cpm_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) builtin_platform_driver(xilinx_cpm_pcie_driver);