^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright Altera Corporation (C) 2013-2015. All rights reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Ley Foon Tan <lftan@altera.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Description: Altera PCIe host controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define RP_TX_REG0 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define RP_TX_REG1 0x2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define RP_TX_CNTRL 0x2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define RP_TX_EOP 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define RP_TX_SOP 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define RP_RXCPL_STATUS 0x2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define RP_RXCPL_EOP 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define RP_RXCPL_SOP 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define RP_RXCPL_REG0 0x2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define RP_RXCPL_REG1 0x2018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define P2A_INT_STATUS 0x3060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define P2A_INT_STS_ALL 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define P2A_INT_ENABLE 0x3070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define P2A_INT_ENA_ALL 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define RP_LTSSM 0x3c64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define RP_LTSSM_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define LTSSM_L0 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define S10_RP_TX_CNTRL 0x2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define S10_RP_RXCPL_REG 0x2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define S10_RP_RXCPL_STATUS 0x200C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define S10_RP_CFG_ADDR(pcie, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) (((pcie)->hip_base) + (reg) + (1 << 20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define S10_RP_SECONDARY(pcie) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* TLP configuration type 0 and 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define TLP_PAYLOAD_SIZE 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define TLP_READ_TAG 0x1d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define TLP_WRITE_TAG 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define RP_DEVFN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define TLP_CFG_DW0(pcie, cfg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) (((cfg) << 24) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) TLP_PAYLOAD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define TLP_CFG_DW1(pcie, tag, be) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define TLP_CFG_DW2(bus, devfn, offset) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) (((bus) << 24) | ((devfn) << 16) | (offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define TLP_BYTE_COUNT(s) (((s) >> 0) & 0xfff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define TLP_HDR_SIZE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define TLP_LOOP 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define LINK_UP_TIMEOUT HZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define LINK_RETRAIN_TIMEOUT HZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define DWORD_MASK 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define S10_TLP_FMTTYPE_CFGRD0 0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define S10_TLP_FMTTYPE_CFGRD1 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define S10_TLP_FMTTYPE_CFGWR0 0x45
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define S10_TLP_FMTTYPE_CFGWR1 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum altera_pcie_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ALTERA_PCIE_V1 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ALTERA_PCIE_V2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct altera_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void __iomem *cra_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void __iomem *hip_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u8 root_bus_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct irq_domain *irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct resource bus_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) const struct altera_pcie_data *pcie_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct altera_pcie_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int (*tlp_read_pkt)(struct altera_pcie *pcie, u32 *value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void (*tlp_write_pkt)(struct altera_pcie *pcie, u32 *headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u32 data, bool align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bool (*get_link_status)(struct altera_pcie *pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int (*rp_read_cfg)(struct altera_pcie *pcie, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int size, u32 *value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int where, int size, u32 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct altera_pcie_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const struct altera_pcie_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) enum altera_pcie_version version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u32 cap_offset; /* PCIe capability structure register offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u32 cfgrd0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 cfgrd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 cfgwr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 cfgwr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct tlp_rp_regpair_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 reg0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u32 reg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) const u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) writel_relaxed(value, pcie->cra_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return readl_relaxed(pcie->cra_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static bool altera_pcie_link_up(struct altera_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void __iomem *addr = S10_RP_CFG_ADDR(pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) pcie->pcie_data->cap_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) PCI_EXP_LNKSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * Altera PCIe port uses BAR0 of RC's configuration space as the translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * using these registers, so it can be reached by DMA from EP devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * should be hidden during enumeration to avoid the sizing and resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * allocation by PCIe core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (pci_is_root_bus(bus) && (devfn == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) (offset == PCI_BASE_ADDRESS_0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void tlp_write_tx(struct altera_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct tlp_rp_regpair_t *tlp_rp_regdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void s10_tlp_write_tx(struct altera_pcie *pcie, u32 reg0, u32 ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) cra_writel(pcie, reg0, RP_TX_REG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) cra_writel(pcie, ctrl, S10_RP_TX_CNTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static bool altera_pcie_valid_device(struct altera_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct pci_bus *bus, int dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* If there is no link, then there is no device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (bus->number != pcie->root_bus_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (!pcie->pcie_data->ops->get_link_status(pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* access only one slot on each root port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (bus->number == pcie->root_bus_nr && dev > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) bool sop = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u32 reg0, reg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u32 comp_status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Minimum 2 loops to read TLP headers and 1 loop to read data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * payload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) for (i = 0; i < TLP_LOOP; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) reg0 = cra_readl(pcie, RP_RXCPL_REG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) reg1 = cra_readl(pcie, RP_RXCPL_REG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (ctrl & RP_RXCPL_SOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) sop = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) comp_status = TLP_COMP_STATUS(reg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ctrl & RP_RXCPL_EOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (comp_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *value = reg0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static int s10_tlp_read_packet(struct altera_pcie *pcie, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 comp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 dw[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) for (count = 0; count < TLP_LOOP; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (ctrl & RP_RXCPL_SOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* Read first DW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) dw[0] = cra_readl(pcie, S10_RP_RXCPL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* SOP detection failed, return error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (count == TLP_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Poll for EOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) while (count < ARRAY_SIZE(dw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dw[count++] = cra_readl(pcie, S10_RP_RXCPL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (ctrl & RP_RXCPL_EOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) comp_status = TLP_COMP_STATUS(dw[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (comp_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (value && TLP_BYTE_COUNT(dw[1]) == sizeof(u32) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) count == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *value = dw[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) dev_warn(dev, "Malformed TLP packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u32 data, bool align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct tlp_rp_regpair_t tlp_rp_regdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) tlp_rp_regdata.reg0 = headers[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) tlp_rp_regdata.reg1 = headers[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) tlp_rp_regdata.ctrl = RP_TX_SOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) tlp_write_tx(pcie, &tlp_rp_regdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) tlp_rp_regdata.reg0 = headers[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tlp_rp_regdata.reg1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) tlp_rp_regdata.ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) tlp_write_tx(pcie, &tlp_rp_regdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) tlp_rp_regdata.reg0 = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) tlp_rp_regdata.reg1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) tlp_rp_regdata.reg0 = headers[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) tlp_rp_regdata.reg1 = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) tlp_rp_regdata.ctrl = RP_TX_EOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) tlp_write_tx(pcie, &tlp_rp_regdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u32 data, bool dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) s10_tlp_write_tx(pcie, headers[0], RP_TX_SOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) s10_tlp_write_tx(pcie, headers[1], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) s10_tlp_write_tx(pcie, headers[2], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) s10_tlp_write_tx(pcie, data, RP_TX_EOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int where, u8 byte_en, bool read, u32 *headers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u8 cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (pcie->pcie_data->version == ALTERA_PCIE_V1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) headers[0] = TLP_CFG_DW0(pcie, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) headers[1] = TLP_CFG_DW1(pcie, tag, byte_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) headers[2] = TLP_CFG_DW2(bus, devfn, where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int where, u8 byte_en, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 headers[TLP_HDR_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) get_tlp_header(pcie, bus, devfn, where, byte_en, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) headers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return pcie->pcie_data->ops->tlp_read_pkt(pcie, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int where, u8 byte_en, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u32 headers[TLP_HDR_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) get_tlp_header(pcie, bus, devfn, where, byte_en, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) headers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* check alignment to Qword */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if ((where & 0x7) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) value, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) value, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ret = pcie->pcie_data->ops->tlp_read_pkt(pcie, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (ret != PCIBIOS_SUCCESSFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Monitor changes to PCI_PRIMARY_BUS register on root port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * and update local copy of root bus number accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pcie->root_bus_nr = (u8)(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int s10_rp_read_cfg(struct altera_pcie *pcie, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int size, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *value = readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *value = readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *value = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int where, int size, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) writeb(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) writew(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) writel(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Monitor changes to PCI_PRIMARY_BUS register on root port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * and update local copy of root bus number accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pcie->root_bus_nr = value & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) unsigned int devfn, int where, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) u8 byte_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_read_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) size, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) byte_en = 1 << (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) byte_en = 3 << (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) byte_en = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ret = tlp_cfg_dword_read(pcie, busno, devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) (where & ~DWORD_MASK), byte_en, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (ret != PCIBIOS_SUCCESSFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) *value = (data >> (8 * (where & 0x3))) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *value = (data >> (8 * (where & 0x2))) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) *value = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) unsigned int devfn, int where, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) u32 data32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) u32 shift = 8 * (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u8 byte_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_write_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) where, size, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) data32 = (value & 0xff) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) byte_en = 1 << (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) data32 = (value & 0xffff) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) byte_en = 3 << (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) data32 = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) byte_en = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) byte_en, data32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int where, int size, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct altera_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (altera_pcie_hide_rc_bar(bus, devfn, where))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *value = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int where, int size, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct altera_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (altera_pcie_hide_rc_bar(bus, devfn, where))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static struct pci_ops altera_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) .read = altera_pcie_cfg_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) .write = altera_pcie_cfg_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unsigned int devfn, int offset, u16 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ret = _altera_pcie_cfg_read(pcie, busno, devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) pcie->pcie_data->cap_offset + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) sizeof(*value),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *value = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) unsigned int devfn, int offset, u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return _altera_pcie_cfg_write(pcie, busno, devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) pcie->pcie_data->cap_offset + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) sizeof(value),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static void altera_wait_link_retrain(struct altera_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) u16 reg16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unsigned long start_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Wait for link training end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) start_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) PCI_EXP_LNKSTA, ®16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!(reg16 & PCI_EXP_LNKSTA_LT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dev_err(dev, "link retrain timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Wait for link is up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) start_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (pcie->pcie_data->ops->get_link_status(pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dev_err(dev, "link up timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static void altera_pcie_retrain(struct altera_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u16 linkcap, linkstat, linkctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!pcie->pcie_data->ops->get_link_status(pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * current speed is 2.5 GB/s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) &linkcap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) &linkstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) PCI_EXP_LNKCTL, &linkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) linkctl |= PCI_EXP_LNKCTL_RL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) PCI_EXP_LNKCTL, linkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) altera_wait_link_retrain(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static const struct irq_domain_ops intx_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) .map = altera_pcie_intx_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) .xlate = pci_irqd_intx_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static void altera_pcie_isr(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct altera_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) u32 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) u32 virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) pcie = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) while ((status = cra_readl(pcie, P2A_INT_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) & P2A_INT_STS_ALL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) for_each_set_bit(bit, &status, PCI_NUM_INTX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) virq = irq_find_mapping(pcie->irq_domain, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) dev_err(dev, "unexpected IRQ, INT%d\n", bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct device_node *node = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Setup INTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) &intx_domain_ops, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!pcie->irq_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dev_err(dev, "Failed to get a INTx IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static void altera_pcie_irq_teardown(struct altera_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) irq_domain_remove(pcie->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) irq_dispose_mapping(pcie->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static int altera_pcie_parse_dt(struct altera_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct platform_device *pdev = pcie->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) pcie->cra_base = devm_platform_ioremap_resource_byname(pdev, "Cra");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (IS_ERR(pcie->cra_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return PTR_ERR(pcie->cra_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) pcie->hip_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) devm_platform_ioremap_resource_byname(pdev, "Hip");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (IS_ERR(pcie->hip_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return PTR_ERR(pcie->hip_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* setup IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) pcie->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (pcie->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return pcie->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static void altera_pcie_host_init(struct altera_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) altera_pcie_retrain(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) .tlp_read_pkt = tlp_read_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) .tlp_write_pkt = tlp_write_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) .get_link_status = altera_pcie_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) .tlp_read_pkt = s10_tlp_read_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) .tlp_write_pkt = s10_tlp_write_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) .get_link_status = s10_altera_pcie_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) .rp_read_cfg = s10_rp_read_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) .rp_write_cfg = s10_rp_write_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static const struct altera_pcie_data altera_pcie_1_0_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) .ops = &altera_pcie_ops_1_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) .cap_offset = 0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) .version = ALTERA_PCIE_V1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .cfgrd0 = TLP_FMTTYPE_CFGRD0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .cfgrd1 = TLP_FMTTYPE_CFGRD1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .cfgwr0 = TLP_FMTTYPE_CFGWR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .cfgwr1 = TLP_FMTTYPE_CFGWR1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static const struct altera_pcie_data altera_pcie_2_0_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) .ops = &altera_pcie_ops_2_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) .version = ALTERA_PCIE_V2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) .cap_offset = 0x70,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .cfgrd0 = S10_TLP_FMTTYPE_CFGRD0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .cfgrd1 = S10_TLP_FMTTYPE_CFGRD1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .cfgwr0 = S10_TLP_FMTTYPE_CFGWR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static const struct of_device_id altera_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {.compatible = "altr,pcie-root-port-1.0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) .data = &altera_pcie_1_0_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {.compatible = "altr,pcie-root-port-2.0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .data = &altera_pcie_2_0_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static int altera_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct altera_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) pcie = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) pcie->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) match = of_match_device(altera_pcie_of_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) pcie->pcie_data = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ret = altera_pcie_parse_dt(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dev_err(dev, "Parsing DT failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ret = altera_pcie_init_irq_domain(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) dev_err(dev, "Failed creating IRQ Domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* clear all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* enable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) altera_pcie_host_init(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) bridge->sysdata = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) bridge->busnr = pcie->root_bus_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) bridge->ops = &altera_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return pci_host_probe(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static int altera_pcie_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct altera_pcie *pcie = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) pci_stop_root_bus(bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) pci_remove_root_bus(bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) altera_pcie_irq_teardown(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static struct platform_driver altera_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) .probe = altera_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) .remove = altera_pcie_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) .name = "altera-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) .of_match_table = altera_pcie_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) module_platform_driver(altera_pcie_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) MODULE_LICENSE("GPL v2");