Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * PCIe host controller driver for Marvell Armada-8K SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Armada-8K PCIe Glue Layer Source Code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2016 Marvell Technology Group Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Author: Yehuda Yitshak <yehuday@marvell.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Author: Shadi Ammouri <shadi@marvell.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) struct armada8k_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct clk *clk_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct phy *phy[ARMADA8K_PCIE_MAX_LANES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	unsigned int phy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define PCIE_VENDOR_REGS_OFFSET		0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define PCIE_GLOBAL_CONTROL_REG		(PCIE_VENDOR_REGS_OFFSET + 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define PCIE_APP_LTSSM_EN		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define PCIE_DEVICE_TYPE_SHIFT		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define PCIE_DEVICE_TYPE_MASK		0xF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define PCIE_DEVICE_TYPE_RC		0x4 /* Root complex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define PCIE_GLOBAL_STATUS_REG		(PCIE_VENDOR_REGS_OFFSET + 0x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define PCIE_GLB_STS_RDLH_LINK_UP	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define PCIE_GLB_STS_PHY_LINK_UP	BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define PCIE_GLOBAL_INT_CAUSE1_REG	(PCIE_VENDOR_REGS_OFFSET + 0x1C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define PCIE_GLOBAL_INT_MASK1_REG	(PCIE_VENDOR_REGS_OFFSET + 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define PCIE_INT_A_ASSERT_MASK		BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define PCIE_INT_B_ASSERT_MASK		BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define PCIE_INT_C_ASSERT_MASK		BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define PCIE_INT_D_ASSERT_MASK		BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define PCIE_ARCACHE_TRC_REG		(PCIE_VENDOR_REGS_OFFSET + 0x50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define PCIE_AWCACHE_TRC_REG		(PCIE_VENDOR_REGS_OFFSET + 0x54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define PCIE_ARUSER_REG			(PCIE_VENDOR_REGS_OFFSET + 0x5C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define PCIE_AWUSER_REG			(PCIE_VENDOR_REGS_OFFSET + 0x60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define ARCACHE_DEFAULT_VALUE		0x3511
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define AWCACHE_DEFAULT_VALUE		0x5311
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define DOMAIN_OUTER_SHAREABLE		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define AX_USER_DOMAIN_MASK		0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define AX_USER_DOMAIN_SHIFT		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define to_armada8k_pcie(x)	dev_get_drvdata((x)->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		phy_power_off(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		phy_exit(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		ret = phy_init(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 				       pcie->phy_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			phy_exit(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		ret = phy_power_on(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			phy_exit(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct device_node *node = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		if (IS_ERR(pcie->phy[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			if (PTR_ERR(pcie->phy[i]) != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 				return PTR_ERR(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			pcie->phy[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		pcie->phy_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	/* Old bindings miss the PHY handle, so just warn if there is no PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (!pcie->phy_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		dev_warn(dev, "No available PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	ret = armada8k_pcie_enable_phys(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int armada8k_pcie_link_up(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	if ((reg & mask) == mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (!dw_pcie_link_up(pci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		/* Disable LTSSM state machine to enable configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		reg &= ~(PCIE_APP_LTSSM_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	/* Set the device to root complex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/* Set the PCIe master AxCache attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	/* Set the PCIe master AxDomain attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/* Enable INT A-D interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	       PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (!dw_pcie_link_up(pci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		/* Configuration done. Start LTSSM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		reg |= PCIE_APP_LTSSM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	/* Wait until the link becomes active again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (dw_pcie_wait_for_link(pci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		dev_err(pci->dev, "Link not up after reconfiguration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static int armada8k_pcie_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	dw_pcie_setup_rc(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	armada8k_pcie_establish_link(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	struct armada8k_pcie *pcie = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	 * Interrupts are directly handled by the device driver of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	 * PCI device. However, they are also latched into the PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	 * controller, so we simply discard them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	.host_init = armada8k_pcie_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 				  struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	pp->ops = &armada8k_pcie_host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	pp->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (pp->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return pp->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			       IRQF_SHARED, "armada8k-pcie", pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		dev_err(dev, "failed to request irq %d\n", pp->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	ret = dw_pcie_host_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		dev_err(dev, "failed to initialize host: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static const struct dw_pcie_ops dw_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	.link_up = armada8k_pcie_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int armada8k_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	struct armada8k_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	struct resource *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if (!pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (!pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	pci->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	pci->ops = &dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	pcie->pci = pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	pcie->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if (IS_ERR(pcie->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		return PTR_ERR(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	ret = clk_prepare_enable(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	pcie->clk_reg = devm_clk_get(dev, "reg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		ret = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (!IS_ERR(pcie->clk_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		ret = clk_prepare_enable(pcie->clk_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			goto fail_clkreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	/* Get the dw-pcie unit configuration/control registers base. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (IS_ERR(pci->dbi_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		ret = PTR_ERR(pci->dbi_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		goto fail_clkreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	ret = armada8k_pcie_setup_phys(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		goto fail_clkreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	ret = armada8k_add_pcie_port(pcie, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		goto disable_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) disable_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	armada8k_pcie_disable_phys(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) fail_clkreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	clk_disable_unprepare(pcie->clk_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	clk_disable_unprepare(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static const struct of_device_id armada8k_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	{ .compatible = "marvell,armada8k-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static struct platform_driver armada8k_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	.probe		= armada8k_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		.name	= "armada8k-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		.of_match_table = of_match_ptr(armada8k_pcie_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		.suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) builtin_platform_driver(armada8k_pcie_driver);