Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * PCIe host controller driver for Samsung Exynos SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *		https://www.samsung.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Author: Jingoo Han <jg1.han@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define to_exynos_pcie(x)	dev_get_drvdata((x)->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /* PCIe ELBI registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define PCIE_IRQ_PULSE			0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define IRQ_INTA_ASSERT			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define IRQ_INTB_ASSERT			BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define IRQ_INTC_ASSERT			BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define IRQ_INTD_ASSERT			BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define PCIE_IRQ_LEVEL			0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define PCIE_IRQ_SPECIAL		0x008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define PCIE_IRQ_EN_PULSE		0x00c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define PCIE_IRQ_EN_LEVEL		0x010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define IRQ_MSI_ENABLE			BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define PCIE_IRQ_EN_SPECIAL		0x014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define PCIE_PWR_RESET			0x018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define PCIE_CORE_RESET			0x01c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define PCIE_CORE_RESET_ENABLE		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define PCIE_STICKY_RESET		0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define PCIE_NONSTICKY_RESET		0x024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define PCIE_APP_INIT_RESET		0x028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define PCIE_APP_LTSSM_ENABLE		0x02c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define PCIE_ELBI_RDLH_LINKUP		0x064
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define PCIE_ELBI_LTSSM_ENABLE		0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define PCIE_ELBI_SLV_AWMISC		0x11c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define PCIE_ELBI_SLV_ARMISC		0x120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define PCIE_ELBI_SLV_DBI_ENABLE	BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) struct exynos_pcie_mem_res {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	void __iomem *elbi_base;   /* DT 0th resource: PCIe CTRL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) struct exynos_pcie_clk_res {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct clk *bus_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) struct exynos_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct dw_pcie			*pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct exynos_pcie_mem_res	*mem_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct exynos_pcie_clk_res	*clk_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	const struct exynos_pcie_ops	*ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	int				reset_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct phy			*phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) struct exynos_pcie_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	int (*get_mem_resources)(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			struct exynos_pcie *ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	int (*get_clk_resources)(struct exynos_pcie *ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int (*init_clk_resources)(struct exynos_pcie *ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	void (*deinit_clk_resources)(struct exynos_pcie *ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 					     struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct dw_pcie *pci = ep->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (!ep->mem_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	ep->mem_res->elbi_base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (IS_ERR(ep->mem_res->elbi_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		return PTR_ERR(ep->mem_res->elbi_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct dw_pcie *pci = ep->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (!ep->clk_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	ep->clk_res->clk = devm_clk_get(dev, "pcie");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	if (IS_ERR(ep->clk_res->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		dev_err(dev, "Failed to get pcie rc clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		return PTR_ERR(ep->clk_res->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (IS_ERR(ep->clk_res->bus_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		dev_err(dev, "Failed to get pcie bus clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return PTR_ERR(ep->clk_res->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	struct dw_pcie *pci = ep->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	ret = clk_prepare_enable(ep->clk_res->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		dev_err(dev, "cannot enable pcie rc clock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	ret = clk_prepare_enable(ep->clk_res->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		dev_err(dev, "cannot enable pcie bus clock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		goto err_bus_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) err_bus_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	clk_disable_unprepare(ep->clk_res->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	clk_disable_unprepare(ep->clk_res->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	clk_disable_unprepare(ep->clk_res->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static const struct exynos_pcie_ops exynos5440_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	.get_mem_resources	= exynos5440_pcie_get_mem_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	.get_clk_resources	= exynos5440_pcie_get_clk_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	.init_clk_resources	= exynos5440_pcie_init_clk_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	.deinit_clk_resources	= exynos5440_pcie_deinit_clk_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	writel(val, base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	return readl(base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		val |= PCIE_ELBI_SLV_DBI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		val |= PCIE_ELBI_SLV_DBI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	val &= ~PCIE_CORE_RESET_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	val |= PCIE_CORE_RESET_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct dw_pcie *pci = ep->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (ep->reset_gpio >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		devm_gpio_request_one(dev, ep->reset_gpio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				GPIOF_OUT_INIT_HIGH, "RESET");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static int exynos_pcie_establish_link(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	struct dw_pcie *pci = ep->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (dw_pcie_link_up(pci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		dev_err(dev, "Link already up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	exynos_pcie_assert_core_reset(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	phy_reset(ep->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	exynos_pcie_writel(ep->mem_res->elbi_base, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			PCIE_PWR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	phy_power_on(ep->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	phy_init(ep->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	exynos_pcie_deassert_core_reset(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	dw_pcie_setup_rc(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	exynos_pcie_assert_reset(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	/* assert LTSSM enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			  PCIE_APP_LTSSM_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	/* check if the link is up or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (!dw_pcie_wait_for_link(pci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	phy_power_off(ep->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	/* enable INTX interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	struct exynos_pcie *ep = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	exynos_pcie_clear_irq_pulse(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void exynos_pcie_msi_init(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	struct dw_pcie *pci = ep->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	dw_pcie_msi_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/* enable MSI interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	val |= IRQ_MSI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	exynos_pcie_enable_irq_pulse(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		exynos_pcie_msi_init(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 				u32 reg, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	struct exynos_pcie *ep = to_exynos_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	exynos_pcie_sideband_dbi_r_mode(ep, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	dw_pcie_read(base + reg, size, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	exynos_pcie_sideband_dbi_r_mode(ep, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				  u32 reg, size_t size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	struct exynos_pcie *ep = to_exynos_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	exynos_pcie_sideband_dbi_w_mode(ep, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	dw_pcie_write(base + reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	exynos_pcie_sideband_dbi_w_mode(ep, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 				   int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (PCI_SLOT(devfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		*val = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	*val = dw_pcie_read_dbi(pci, where, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 				   int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	if (PCI_SLOT(devfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	dw_pcie_write_dbi(pci, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static struct pci_ops exynos_pci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	.read = exynos_pcie_rd_own_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	.write = exynos_pcie_wr_own_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static int exynos_pcie_link_up(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	struct exynos_pcie *ep = to_exynos_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (val == PCIE_ELBI_LTSSM_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static int exynos_pcie_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	struct exynos_pcie *ep = to_exynos_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	pp->bridge->ops = &exynos_pci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	exynos_pcie_establish_link(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	exynos_pcie_enable_interrupts(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	.host_init = exynos_pcie_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 				       struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	struct dw_pcie *pci = ep->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	pp->irq = platform_get_irq(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (pp->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		return pp->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 				IRQF_SHARED, "exynos-pcie", ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		dev_err(dev, "failed to request irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		pp->msi_irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		if (pp->msi_irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			return pp->msi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	pp->ops = &exynos_pcie_host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	ret = dw_pcie_host_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		dev_err(dev, "failed to initialize host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static const struct dw_pcie_ops dw_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	.read_dbi = exynos_pcie_read_dbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	.write_dbi = exynos_pcie_write_dbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	.link_up = exynos_pcie_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int __init exynos_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	struct exynos_pcie *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	if (!pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	pci->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	pci->ops = &dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	ep->pci = pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	ep->ops = (const struct exynos_pcie_ops *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	ep->phy = devm_of_phy_get(dev, np, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	if (IS_ERR(ep->phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		if (PTR_ERR(ep->phy) != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			return PTR_ERR(ep->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		ep->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	if (ep->ops && ep->ops->get_mem_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		ret = ep->ops->get_mem_resources(pdev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (ep->ops && ep->ops->get_clk_resources &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			ep->ops->init_clk_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		ret = ep->ops->get_clk_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		ret = ep->ops->init_clk_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	platform_set_drvdata(pdev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	ret = exynos_add_pcie_port(ep, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		goto fail_probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) fail_probe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	phy_exit(ep->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	if (ep->ops && ep->ops->deinit_clk_resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		ep->ops->deinit_clk_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static int __exit exynos_pcie_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	struct exynos_pcie *ep = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	if (ep->ops && ep->ops->deinit_clk_resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		ep->ops->deinit_clk_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static const struct of_device_id exynos_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		.compatible = "samsung,exynos5440-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		.data = &exynos5440_pcie_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static struct platform_driver exynos_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	.remove		= __exit_p(exynos_pcie_remove),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		.name	= "exynos-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		.of_match_table = exynos_pcie_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Exynos PCIe driver does not allow module unload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static int __init exynos_pcie_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) subsys_initcall(exynos_pcie_init);