^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCIe endpoint controller driver for UniPhier SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2018 Socionext Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* Link Glue registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define PCL_RSTCTRL0 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define PCL_RSTCTRL_AXI_REG BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define PCL_RSTCTRL_AXI_SLAVE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define PCL_RSTCTRL_AXI_MASTER BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PCL_RSTCTRL_PIPE3 BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define PCL_RSTCTRL1 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PCL_RSTCTRL_PERST BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define PCL_RSTCTRL2 0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define PCL_RSTCTRL_PHY_RESET BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PCL_MODE 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PCL_MODE_REGEN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PCL_MODE_REGVAL BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PCL_APP_CLK_CTRL 0x8004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PCL_APP_CLK_REQ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PCL_APP_READY_CTRL 0x8008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define PCL_APP_LTSSM_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define PCL_APP_MSI0 0x8040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PCL_APP_MSI1 0x8044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PCL_APP_MSI_REQ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PCL_APP_INTX 0x8074
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PCL_APP_INTX_SYS_INT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* assertion time of INTx in usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PCL_INTX_WIDTH_USEC 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct uniphier_pcie_ep_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct dw_pcie pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct clk *clk, *clk_gio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct reset_control *rst, *rst_gio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) const struct pci_epc_features *features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) val = readl(priv->base + PCL_APP_READY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) val |= PCL_APP_LTSSM_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) val &= ~PCL_APP_LTSSM_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) writel(val, priv->base + PCL_APP_READY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) bool assert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) val = readl(priv->base + PCL_RSTCTRL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (assert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) val |= PCL_RSTCTRL_PHY_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) val &= ~PCL_RSTCTRL_PHY_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) writel(val, priv->base + PCL_RSTCTRL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* set EP mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) val = readl(priv->base + PCL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) writel(val, priv->base + PCL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* clock request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) val = readl(priv->base + PCL_APP_CLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) val &= ~PCL_APP_CLK_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) writel(val, priv->base + PCL_APP_CLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* deassert PIPE3 and AXI reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) val = readl(priv->base + PCL_RSTCTRL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) writel(val, priv->base + PCL_RSTCTRL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) uniphier_pcie_ltssm_enable(priv, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int uniphier_pcie_start_link(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) uniphier_pcie_ltssm_enable(priv, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void uniphier_pcie_stop_link(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) uniphier_pcie_ltssm_enable(priv, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) enum pci_barno bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) for (bar = BAR_0; bar <= BAR_5; bar++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) dw_pcie_ep_reset_bar(pci, bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * This makes pulse signal to send INTx to the RC, so this should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * be cleared as soon as possible. This sequence is covered with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * mutex in pci_epc_raise_irq().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* assert INTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) val = readl(priv->base + PCL_APP_INTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) val |= PCL_APP_INTX_SYS_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) writel(val, priv->base + PCL_APP_INTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) udelay(PCL_INTX_WIDTH_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* deassert INTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) val &= ~PCL_APP_INTX_SYS_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) writel(val, priv->base + PCL_APP_INTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u8 func_no, u16 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) writel(val, priv->base + PCL_APP_MSI0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) val = readl(priv->base + PCL_APP_MSI1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) val |= PCL_APP_MSI_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) writel(val, priv->base + PCL_APP_MSI1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) enum pci_epc_irq_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u16 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) case PCI_EPC_IRQ_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return uniphier_pcie_ep_raise_legacy_irq(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) case PCI_EPC_IRQ_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) interrupt_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static const struct pci_epc_features*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) uniphier_pcie_get_features(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return priv->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .ep_init = uniphier_pcie_ep_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .raise_irq = uniphier_pcie_ep_raise_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .get_features = uniphier_pcie_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int uniphier_add_pcie_ep(struct uniphier_pcie_ep_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct dw_pcie *pci = &priv->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct dw_pcie_ep *ep = &pci->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ep->ops = &uniphier_pcie_ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (IS_ERR(pci->dbi_base2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return PTR_ERR(pci->dbi_base2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ep->phys_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ep->addr_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ret = dw_pcie_ep_init(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) dev_err(dev, "Failed to initialize endpoint (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ret = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ret = clk_prepare_enable(priv->clk_gio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto out_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ret = reset_control_deassert(priv->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto out_clk_gio_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ret = reset_control_deassert(priv->rst_gio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto out_rst_assert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) uniphier_pcie_init_ep(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) uniphier_pcie_phy_reset(priv, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = phy_init(priv->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) goto out_rst_gio_assert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) uniphier_pcie_phy_reset(priv, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) out_rst_gio_assert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) reset_control_assert(priv->rst_gio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) out_rst_assert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) reset_control_assert(priv->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) out_clk_gio_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) clk_disable_unprepare(priv->clk_gio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) out_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static const struct dw_pcie_ops dw_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) .start_link = uniphier_pcie_start_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) .stop_link = uniphier_pcie_stop_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int uniphier_pcie_ep_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct uniphier_pcie_ep_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) priv->features = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (WARN_ON(!priv->features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) priv->pci.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) priv->pci.ops = &dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (IS_ERR(priv->pci.dbi_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return PTR_ERR(priv->pci.dbi_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (IS_ERR(priv->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return PTR_ERR(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) priv->clk_gio = devm_clk_get(dev, "gio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (IS_ERR(priv->clk_gio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return PTR_ERR(priv->clk_gio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (IS_ERR(priv->rst_gio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return PTR_ERR(priv->rst_gio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) priv->clk = devm_clk_get(dev, "link");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (IS_ERR(priv->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return PTR_ERR(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) priv->rst = devm_reset_control_get_shared(dev, "link");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (IS_ERR(priv->rst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return PTR_ERR(priv->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) priv->phy = devm_phy_optional_get(dev, "pcie-phy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (IS_ERR(priv->phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = PTR_ERR(priv->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev_err(dev, "Failed to get phy (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ret = uniphier_pcie_ep_enable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return uniphier_add_pcie_ep(priv, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static const struct pci_epc_features uniphier_pro5_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .linkup_notifier = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) .msi_capable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) .msix_capable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) .align = 1 << 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) .reserved_bar = BIT(BAR_4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static const struct of_device_id uniphier_pcie_ep_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .compatible = "socionext,uniphier-pro5-pcie-ep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .data = &uniphier_pro5_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) { /* sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static struct platform_driver uniphier_pcie_ep_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) .probe = uniphier_pcie_ep_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) .name = "uniphier-pcie-ep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) .of_match_table = uniphier_pcie_ep_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) builtin_platform_driver(uniphier_pcie_ep_driver);