^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCIe Gen4 host controller driver for NXP Layerscape SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2019-2020 NXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "pcie-mobiveil.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* LUT and PF control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PCIE_LUT_OFF 0x80000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define PCIE_PF_OFF 0xc0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define PCIE_PF_INT_STAT 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PF_INT_STAT_PABRST BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define PCIE_PF_DBG 0x7fc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define PF_DBG_LTSSM_MASK 0x3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define PF_DBG_LTSSM_L0 0x2d /* L0 state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PF_DBG_WE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PF_DBG_PABR BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct ls_pcie_g4 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct mobiveil_pcie pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct delayed_work dwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return ioread32(pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u32 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) state = state & PF_DBG_LTSSM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (state == PF_DBG_LTSSM_L0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct mobiveil_pcie *mv_pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct mobiveil_pcie *mv_pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Clear the interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct mobiveil_pcie *mv_pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct device *dev = &mv_pci->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 val, act_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int to = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Poll for pab_csb_reset to set and PAB activity to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) usleep_range(10, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (to < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) dev_err(dev, "Poll PABRST&PABACT timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* clear PEX_RESET bit in PEX_PF0_DBG register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) val |= PF_DBG_WE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) val |= PF_DBG_PABR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) val &= ~PF_DBG_WE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mobiveil_host_init(mv_pci, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) to = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) while (!ls_pcie_g4_link_up(mv_pci) && to--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) usleep_range(200, 250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (to < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) dev_err(dev, "PCIe link training timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct mobiveil_pcie *mv_pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (val & PAB_INTP_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ls_pcie_g4_disable_interrupt(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct platform_device *pdev = mv_pci->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) pcie->irq = platform_get_irq_byname(pdev, "intr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (pcie->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return pcie->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) IRQF_SHARED, pdev->name, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void ls_pcie_g4_reset(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct delayed_work *dwork = container_of(work, struct delayed_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct mobiveil_pcie *mv_pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u16 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!ls_pcie_g4_reinit_hw(pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ls_pcie_g4_enable_interrupt(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .interrupt_init = ls_pcie_g4_interrupt_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .link_up = ls_pcie_g4_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int __init ls_pcie_g4_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct mobiveil_pcie *mv_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct ls_pcie_g4 *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!of_parse_phandle(np, "msi-parent", 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dev_err(dev, "Failed to find msi-parent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pcie = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mv_pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mv_pci->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mv_pci->ops = &ls_pcie_g4_pab_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mv_pci->rp.bridge = bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ret = mobiveil_pcie_host_probe(mv_pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dev_err(dev, "Fail to probe\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ls_pcie_g4_enable_interrupt(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static const struct of_device_id ls_pcie_g4_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) { .compatible = "fsl,lx2160a-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static struct platform_driver ls_pcie_g4_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .name = "layerscape-pcie-gen4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .of_match_table = ls_pcie_g4_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);