Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * PCIe host controller driver for Tegra SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2010, CompuLab, Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Author: Mike Rapoport <mike@compulab.co.il>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Based on NVIDIA PCIe driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (c) 2008-2009, NVIDIA Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Bits taken from arch/arm/mach-dove/pcie.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Author: Thierry Reding <treding@nvidia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <soc/tegra/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <soc/tegra/pmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define INT_PCI_MSI_NR (8 * 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /* register definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define AFI_AXI_BAR0_SZ	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define AFI_AXI_BAR1_SZ	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define AFI_AXI_BAR2_SZ	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define AFI_AXI_BAR3_SZ	0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define AFI_AXI_BAR4_SZ	0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define AFI_AXI_BAR5_SZ	0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define AFI_AXI_BAR0_START	0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define AFI_AXI_BAR1_START	0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define AFI_AXI_BAR2_START	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define AFI_AXI_BAR3_START	0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define AFI_AXI_BAR4_START	0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define AFI_AXI_BAR5_START	0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define AFI_FPCI_BAR0	0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define AFI_FPCI_BAR1	0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define AFI_FPCI_BAR2	0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define AFI_FPCI_BAR3	0x3c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define AFI_FPCI_BAR4	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define AFI_FPCI_BAR5	0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define AFI_CACHE_BAR0_SZ	0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define AFI_CACHE_BAR0_ST	0x4c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define AFI_CACHE_BAR1_SZ	0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define AFI_CACHE_BAR1_ST	0x54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define AFI_MSI_BAR_SZ		0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define AFI_MSI_FPCI_BAR_ST	0x64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define AFI_MSI_AXI_BAR_ST	0x68
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define AFI_MSI_VEC0		0x6c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define AFI_MSI_VEC1		0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define AFI_MSI_VEC2		0x74
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define AFI_MSI_VEC3		0x78
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define AFI_MSI_VEC4		0x7c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define AFI_MSI_VEC5		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define AFI_MSI_VEC6		0x84
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define AFI_MSI_VEC7		0x88
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define AFI_MSI_EN_VEC0		0x8c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define AFI_MSI_EN_VEC1		0x90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define AFI_MSI_EN_VEC2		0x94
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define AFI_MSI_EN_VEC3		0x98
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define AFI_MSI_EN_VEC4		0x9c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define AFI_MSI_EN_VEC5		0xa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define AFI_MSI_EN_VEC6		0xa4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define AFI_MSI_EN_VEC7		0xa8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define AFI_CONFIGURATION		0xac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define AFI_FPCI_ERROR_MASKS	0xb0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define AFI_INTR_MASK		0xb4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define AFI_INTR_CODE			0xb8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define  AFI_INTR_CODE_MASK		0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define  AFI_INTR_INI_SLAVE_ERROR	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define  AFI_INTR_INI_DECODE_ERROR	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define  AFI_INTR_TARGET_ABORT		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define  AFI_INTR_MASTER_ABORT		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #define  AFI_INTR_INVALID_WRITE		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define  AFI_INTR_LEGACY		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define  AFI_INTR_FPCI_DECODE_ERROR	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define  AFI_INTR_AXI_DECODE_ERROR	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define  AFI_INTR_FPCI_TIMEOUT		9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define  AFI_INTR_PE_PRSNT_SENSE	10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define  AFI_INTR_PE_CLKREQ_SENSE	11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define  AFI_INTR_CLKCLAMP_SENSE	12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define  AFI_INTR_RDY4PD_SENSE		13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define  AFI_INTR_P2P_ERROR		14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define AFI_INTR_SIGNATURE	0xbc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define AFI_UPPER_FPCI_ADDRESS	0xc0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define AFI_SM_INTR_ENABLE	0xc4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define AFI_AFI_INTR_ENABLE		0xc8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define AFI_PCIE_PME		0xf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define AFI_PCIE_CONFIG					0x0f8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #define AFI_FUSE			0x104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define AFI_PEX0_CTRL			0x110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #define AFI_PEX1_CTRL			0x118
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #define  AFI_PEX_CTRL_RST		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define AFI_PLLE_CONTROL		0x160
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define AFI_PEXBIAS_CTRL_0		0x168
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) #define RP_ECTL_2_R1	0x00000e84
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) #define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define RP_ECTL_4_R1	0x00000e8c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #define RP_ECTL_5_R1	0x00000e90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #define RP_ECTL_6_R1	0x00000e94
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) #define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) #define RP_ECTL_2_R2	0x00000ea4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) #define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) #define RP_ECTL_4_R2	0x00000eac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #define RP_ECTL_5_R2	0x00000eb0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) #define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #define RP_ECTL_6_R2	0x00000eb4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #define RP_VEND_XP	0x00000f00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #define  RP_VEND_XP_DL_UP			(1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) #define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #define RP_VEND_CTL0	0x00000f44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) #define RP_VEND_CTL1	0x00000f48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) #define  RP_VEND_CTL1_ERPT	(1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #define RP_VEND_XP_BIST	0x00000f4c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) #define RP_VEND_CTL2 0x00000fa8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) #define RP_PRIV_MISC	0x00000fe0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) #define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #define RP_LINK_CONTROL_STATUS			0x00000090
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) #define RP_LINK_CONTROL_STATUS_2		0x000000b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) #define PADS_CTL_SEL		0x0000009c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) #define PADS_CTL		0x000000a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) #define  PADS_CTL_IDDQ_1L	(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #define PADS_PLL_CTL_TEGRA20			0x000000b8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) #define PADS_PLL_CTL_TEGRA30			0x000000b4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) #define PADS_REFCLK_CFG0			0x000000c8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) #define PADS_REFCLK_CFG1			0x000000cc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) #define PADS_REFCLK_BIAS			0x000000d0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  * entries, one entry per PCIe port. These field definitions and desired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  * values aren't in the TRM, but do come from NVIDIA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) #define PME_ACK_TIMEOUT 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) struct tegra_msi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	struct msi_controller chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) /* used to differentiate between Tegra SoC generations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) struct tegra_pcie_port_soc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		u8 turnoff_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		u8 ack_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	} pme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) struct tegra_pcie_soc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	unsigned int num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	const struct tegra_pcie_port_soc *ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	unsigned int msi_base_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	unsigned long afi_pex2_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	u32 pads_pll_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	u32 tx_ref_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	u32 pads_refclk_cfg0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	u32 pads_refclk_cfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	u32 update_fc_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	bool has_pex_clkreq_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	bool has_pex_bias_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	bool has_intr_prsnt_sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	bool has_cml_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	bool has_gen2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	bool force_pca_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	bool program_uphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	bool update_clamp_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	bool program_deskew_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	bool update_fc_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	bool has_cache_bars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 			u32 rp_ectl_2_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			u32 rp_ectl_4_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			u32 rp_ectl_5_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			u32 rp_ectl_6_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			u32 rp_ectl_2_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			u32 rp_ectl_4_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			u32 rp_ectl_5_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			u32 rp_ectl_6_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		} regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		bool enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	} ectl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	return container_of(chip, struct tegra_msi, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) struct tegra_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	void __iomem *pads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	void __iomem *afi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	void __iomem *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct resource cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	struct clk *pex_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	struct clk *afi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct clk *pll_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	struct clk *cml_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	struct reset_control *pex_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	struct reset_control *afi_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	struct reset_control *pcie_xrst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	bool legacy_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	struct tegra_msi msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	struct list_head ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	u32 xbar_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	struct regulator_bulk_data *supplies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	unsigned int num_supplies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	const struct tegra_pcie_soc *soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct dentry *debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) struct tegra_pcie_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct tegra_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	struct resource regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	unsigned int lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	struct phy **phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct gpio_desc *reset_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) struct tegra_pcie_bus {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	unsigned int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			      unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	writel(value, pcie->afi + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return readl(pcie->afi + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			       unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	writel(value, pcie->pads + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	return readl(pcie->pads + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  * The configuration space mapping on Tegra is somewhat similar to the ECAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418)  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419)  * register accesses are mapped:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  *    [27:24] extended register number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  *    [23:16] bus number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  *    [15:11] device number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  *    [10: 8] function number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  *    [ 7: 0] register number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * Mapping the whole extended configuration space would require 256 MiB of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  * virtual address space, only a small part of which will actually be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * To work around this, a 4 KiB region is used to generate the required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  * configuration transaction with relevant B:D:F and register offset values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  * This is achieved by dynamically programming base address and size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * AFI_AXI_BAR used for end point config space mapping to make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * address (access to which generates correct config transaction) falls in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  * this 4 KiB region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 					   unsigned int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 					unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 					int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	struct tegra_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	void __iomem *addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (bus->number == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		unsigned int slot = PCI_SLOT(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		list_for_each_entry(port, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			if (port->index + 1 == slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 				addr = port->base + (where & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		u32 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		/* move 4 KiB window to offset within the FPCI region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		afi_writel(pcie, base, AFI_FPCI_BAR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		/* move to correct offset within the 4 KiB page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		addr = pcie->cfg + (offset & (SZ_4K - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 				  int where, int size, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (bus->number == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return pci_generic_config_read32(bus, devfn, where, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 						 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return pci_generic_config_read(bus, devfn, where, size, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				   int where, int size, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (bus->number == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		return pci_generic_config_write32(bus, devfn, where, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 						  value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	return pci_generic_config_write(bus, devfn, where, size, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static struct pci_ops tegra_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	.map_bus = tegra_pcie_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	.read = tegra_pcie_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	.write = tegra_pcie_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	const struct tegra_pcie_soc *soc = port->pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	unsigned long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	switch (port->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		ret = AFI_PEX0_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		ret = AFI_PEX1_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		ret = soc->afi_pex2_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	unsigned long value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	/* pulse reset signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (port->reset_gpio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		gpiod_set_value(port->reset_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		value = afi_readl(port->pcie, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		value &= ~AFI_PEX_CTRL_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		afi_writel(port->pcie, value, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (port->reset_gpio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		gpiod_set_value(port->reset_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		value = afi_readl(port->pcie, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		value |= AFI_PEX_CTRL_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		afi_writel(port->pcie, value, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	const struct tegra_pcie_soc *soc = port->pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	/* Enable AER capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	value = readl(port->base + RP_VEND_CTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	value |= RP_VEND_CTL1_ERPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	writel(value, port->base + RP_VEND_CTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	/* Optimal settings to enhance bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	value = readl(port->base + RP_VEND_XP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	writel(value, port->base + RP_VEND_XP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * to avoid truncation of PM messages which results in receiver errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	value = readl(port->base + RP_VEND_XP_BIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	writel(value, port->base + RP_VEND_XP_BIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	value = readl(port->base + RP_PRIV_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (soc->update_clamp_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	writel(value, port->base + RP_PRIV_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	const struct tegra_pcie_soc *soc = port->pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	value = readl(port->base + RP_ECTL_2_R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	value |= soc->ectl.regs.rp_ectl_2_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	writel(value, port->base + RP_ECTL_2_R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	value = readl(port->base + RP_ECTL_4_R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	value |= soc->ectl.regs.rp_ectl_4_r1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	writel(value, port->base + RP_ECTL_4_R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	value = readl(port->base + RP_ECTL_5_R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	value |= soc->ectl.regs.rp_ectl_5_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	writel(value, port->base + RP_ECTL_5_R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	value = readl(port->base + RP_ECTL_6_R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	value |= soc->ectl.regs.rp_ectl_6_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	writel(value, port->base + RP_ECTL_6_R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	value = readl(port->base + RP_ECTL_2_R2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	value |= soc->ectl.regs.rp_ectl_2_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	writel(value, port->base + RP_ECTL_2_R2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	value = readl(port->base + RP_ECTL_4_R2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	value |= soc->ectl.regs.rp_ectl_4_r2 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	writel(value, port->base + RP_ECTL_4_R2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	value = readl(port->base + RP_ECTL_5_R2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	value |= soc->ectl.regs.rp_ectl_5_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	writel(value, port->base + RP_ECTL_5_R2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	value = readl(port->base + RP_ECTL_6_R2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	value |= soc->ectl.regs.rp_ectl_6_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	writel(value, port->base + RP_ECTL_6_R2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	const struct tegra_pcie_soc *soc = port->pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * Sometimes link speed change from Gen2 to Gen1 fails due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 * instability in deskew logic on lane-0. Increase the deskew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 * retry time to resolve this issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (soc->program_deskew_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		value = readl(port->base + RP_VEND_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		writel(value, port->base + RP_VEND_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	if (soc->update_fc_timer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		value = readl(port->base + RP_VEND_XP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		value |= soc->update_fc_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		writel(value, port->base + RP_VEND_XP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 * PCIe link doesn't come up with few legacy PCIe endpoints if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 * Hence, the strategy followed here is to initially advertise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	value &= ~PCI_EXP_LNKSTA_CLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	const struct tegra_pcie_soc *soc = port->pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	unsigned long value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	/* enable reference clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	value = afi_readl(port->pcie, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	value |= AFI_PEX_CTRL_REFCLK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	if (soc->has_pex_clkreq_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		value |= AFI_PEX_CTRL_CLKREQ_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	afi_writel(port->pcie, value, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	tegra_pcie_port_reset(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	if (soc->force_pca_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		value = readl(port->base + RP_VEND_CTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		value |= RP_VEND_CTL2_PCA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		writel(value, port->base + RP_VEND_CTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	tegra_pcie_enable_rp_features(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (soc->ectl.enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		tegra_pcie_program_ectl_settings(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	tegra_pcie_apply_sw_fixup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	const struct tegra_pcie_soc *soc = port->pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	unsigned long value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	/* assert port reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	value = afi_readl(port->pcie, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	value &= ~AFI_PEX_CTRL_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	afi_writel(port->pcie, value, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	/* disable reference clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	value = afi_readl(port->pcie, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (soc->has_pex_clkreq_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	afi_writel(port->pcie, value, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) static void tegra_pcie_port_free(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	struct tegra_pcie *pcie = port->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	devm_iounmap(dev, port->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	devm_release_mem_region(dev, port->regs.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 				resource_size(&port->regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	list_del(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	devm_kfree(dev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) /* Tegra PCIE root complex wrongly reports device class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static void tegra_pcie_fixup_class(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) static void tegra_pcie_relax_enable(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	struct tegra_pcie *pcie = pdev->bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	tegra_cpuidle_pcie_irqs_in_use();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		irq = pcie->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) static irqreturn_t tegra_pcie_isr(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	const char *err_msg[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		"AXI slave error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		"AXI decode error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		"Target abort",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		"Master abort",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		"Invalid write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		"Legacy interrupt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		"Response decoding error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		"AXI response decoding error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		"Transaction timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		"Slot present pin change",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		"Slot clock request change",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		"TMS clock ramp change",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		"TMS ready for power down",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		"Peer2Peer error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct tegra_pcie *pcie = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	u32 code, signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	afi_writel(pcie, 0, AFI_INTR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (code == AFI_INTR_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (code >= ARRAY_SIZE(err_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	 * do not pollute kernel log with master abort reports since they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	 * happen a lot during enumeration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		if (code == AFI_INTR_MASTER_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			dev_err(dev, "  FPCI address: %10llx\n", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * FPCI map is as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * - 0xfdfc000000: I/O space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * - 0xfdfe000000: type 0 configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * - 0xfdff000000: type 1 configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * - 0xfe00000000: type 0 extended configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  * - 0xfe10000000: type 1 extended configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	/* Bar 0: type 1 extended configuration space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	size = resource_size(&pcie->cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	resource_list_for_each_entry(entry, &bridge->windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		u32 fpci_bar, axi_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		struct resource *res = entry->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		switch (resource_type(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		case IORESOURCE_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			/* Bar 1: downstream IO bar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			fpci_bar = 0xfdfc0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 			axi_address = pci_pio_to_address(res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		case IORESOURCE_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			axi_address = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			if (res->flags & IORESOURCE_PREFETCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				/* Bar 2: prefetchable memory BAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				/* Bar 3: non prefetchable memory BAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	/* NULL out the remaining BARs as they are not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (pcie->soc->has_cache_bars) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		/* map all upstream transactions as uncached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	/* MSI translations are setup only when needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	timeout = jiffies + msecs_to_jiffies(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	while (time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		value = pads_readl(pcie, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		if (value & PADS_PLL_CTL_LOCKDET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	/* initialize internal PHY, enable up to 16 PCIE lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/* override IDDQ to 1 on all 4 lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	value = pads_readl(pcie, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	value |= PADS_CTL_IDDQ_1L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	pads_writel(pcie, value, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 * Set up PHY PLL inputs select PLLE output as refclock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	 * set TX ref sel to div10 (not div5).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	value = pads_readl(pcie, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	pads_writel(pcie, value, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	/* reset PLL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	value = pads_readl(pcie, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	value &= ~PADS_PLL_CTL_RST_B4SM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	pads_writel(pcie, value, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	usleep_range(20, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	/* take PLL out of reset  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	value = pads_readl(pcie, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	value |= PADS_PLL_CTL_RST_B4SM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	pads_writel(pcie, value, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	/* wait for the PLL to lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	err = tegra_pcie_pll_wait(pcie, 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		dev_err(dev, "PLL failed to lock: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	/* turn off IDDQ override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	value = pads_readl(pcie, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	value &= ~PADS_CTL_IDDQ_1L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	pads_writel(pcie, value, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	/* enable TX/RX data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	value = pads_readl(pcie, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	pads_writel(pcie, value, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	/* disable TX/RX data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	value = pads_readl(pcie, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	pads_writel(pcie, value, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	/* override IDDQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	value = pads_readl(pcie, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	value |= PADS_CTL_IDDQ_1L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	pads_writel(pcie, value, PADS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/* reset PLL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	value = pads_readl(pcie, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	value &= ~PADS_PLL_CTL_RST_B4SM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	pads_writel(pcie, value, soc->pads_pll_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	usleep_range(20, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct device *dev = port->pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	for (i = 0; i < port->lanes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		err = phy_power_on(port->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	struct device *dev = port->pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	for (i = 0; i < port->lanes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		err = phy_power_off(port->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 				err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (pcie->legacy_phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		if (pcie->phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			err = phy_power_on(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			err = tegra_pcie_phy_enable(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			dev_err(dev, "failed to power on PHY: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	list_for_each_entry(port, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		err = tegra_pcie_port_phy_power_on(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				"failed to power on PCIe port %u PHY: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				port->index, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (pcie->legacy_phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		if (pcie->phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			err = phy_power_off(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			err = tegra_pcie_phy_disable(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			dev_err(dev, "failed to power off PHY: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	list_for_each_entry(port, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		err = tegra_pcie_port_phy_power_off(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				"failed to power off PCIe port %u PHY: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				port->index, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	unsigned long value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	/* enable PLL power down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (pcie->phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	/* power down PCIe slot clock bias pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	if (soc->has_pex_bias_ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	/* configure mode and disable all ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	list_for_each_entry(port, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (soc->has_gen2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		value = afi_readl(pcie, AFI_FUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		afi_writel(pcie, value, AFI_FUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		value = afi_readl(pcie, AFI_FUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		afi_writel(pcie, value, AFI_FUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	/* Disable AFI dynamic clock gating and enable PCIe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	value = afi_readl(pcie, AFI_CONFIGURATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	value |= AFI_CONFIGURATION_EN_FPCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	afi_writel(pcie, value, AFI_CONFIGURATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (soc->has_intr_prsnt_sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		value |= AFI_INTR_EN_PRSNT_SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	/* don't enable MSI for now, only when needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	/* disable all exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static void tegra_pcie_power_off(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	reset_control_assert(pcie->afi_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	clk_disable_unprepare(pcie->pll_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	if (soc->has_cml_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		clk_disable_unprepare(pcie->cml_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	clk_disable_unprepare(pcie->afi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	if (!dev->pm_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		dev_warn(dev, "failed to disable regulators: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static int tegra_pcie_power_on(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	reset_control_assert(pcie->pcie_xrst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	reset_control_assert(pcie->afi_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	reset_control_assert(pcie->pex_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (!dev->pm_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	/* enable regulators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		dev_err(dev, "failed to enable regulators: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	if (!dev->pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			dev_err(dev, "failed to power ungate: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			goto regulator_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			dev_err(dev, "failed to remove clamp: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			goto powergate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	err = clk_prepare_enable(pcie->afi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		goto powergate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (soc->has_cml_clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		err = clk_prepare_enable(pcie->cml_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			dev_err(dev, "failed to enable CML clock: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			goto disable_afi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	err = clk_prepare_enable(pcie->pll_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		goto disable_cml_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	reset_control_deassert(pcie->afi_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) disable_cml_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	if (soc->has_cml_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		clk_disable_unprepare(pcie->cml_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) disable_afi_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	clk_disable_unprepare(pcie->afi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) powergate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (!dev->pm_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) regulator_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	/* Configure the reference clock driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (soc->num_ports > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	pcie->pex_clk = devm_clk_get(dev, "pex");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (IS_ERR(pcie->pex_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		return PTR_ERR(pcie->pex_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	pcie->afi_clk = devm_clk_get(dev, "afi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	if (IS_ERR(pcie->afi_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		return PTR_ERR(pcie->afi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	pcie->pll_e = devm_clk_get(dev, "pll_e");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (IS_ERR(pcie->pll_e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		return PTR_ERR(pcie->pll_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	if (soc->has_cml_clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		pcie->cml_clk = devm_clk_get(dev, "cml");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		if (IS_ERR(pcie->cml_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			return PTR_ERR(pcie->cml_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	if (IS_ERR(pcie->pex_rst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		return PTR_ERR(pcie->pex_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (IS_ERR(pcie->afi_rst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		return PTR_ERR(pcie->afi_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (IS_ERR(pcie->pcie_xrst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		return PTR_ERR(pcie->pcie_xrst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	pcie->phy = devm_phy_optional_get(dev, "pcie");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	if (IS_ERR(pcie->phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		err = PTR_ERR(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		dev_err(dev, "failed to get PHY: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	err = phy_init(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		dev_err(dev, "failed to initialize PHY: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	pcie->legacy_phy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static struct phy *devm_of_phy_optional_get_index(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 						  struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 						  const char *consumer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 						  unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	phy = devm_of_phy_get(dev, np, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	if (PTR_ERR(phy) == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	return phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	struct device *dev = port->pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (!port->phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	for (i = 0; i < port->lanes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		if (IS_ERR(phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 				PTR_ERR(phy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			return PTR_ERR(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		err = phy_init(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 				err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		port->phys[i] = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	struct device_node *np = pcie->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		return tegra_pcie_phys_get_legacy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	list_for_each_entry(port, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		err = tegra_pcie_port_get_phys(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	if (pcie->legacy_phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		err = phy_exit(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 			dev_err(dev, "failed to teardown PHY: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	list_for_each_entry(port, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		for (i = 0; i < port->lanes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 			err = phy_exit(port->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 					i, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	err = tegra_pcie_clocks_get(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		dev_err(dev, "failed to get clocks: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	err = tegra_pcie_resets_get(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		dev_err(dev, "failed to get resets: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	if (soc->program_uphy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		err = tegra_pcie_phys_get(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			dev_err(dev, "failed to get PHYs: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if (IS_ERR(pcie->pads)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		err = PTR_ERR(pcie->pads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		goto phys_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (IS_ERR(pcie->afi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		err = PTR_ERR(pcie->afi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		goto phys_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	/* request configuration space, but remap later, on demand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		err = -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		goto phys_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	pcie->cs = *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	/* constrain configuration space to 4 KiB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	if (IS_ERR(pcie->cfg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		err = PTR_ERR(pcie->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		goto phys_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	/* request interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	err = platform_get_irq_byname(pdev, "intr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		goto phys_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	pcie->irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		dev_err(dev, "failed to register IRQ: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		goto phys_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) phys_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (soc->program_uphy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		tegra_pcie_phys_put(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	if (pcie->irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		free_irq(pcie->irq, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	if (soc->program_uphy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		tegra_pcie_phys_put(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	struct tegra_pcie *pcie = port->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	u8 ack_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	val = afi_readl(pcie, AFI_PCIE_PME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	afi_writel(pcie, val, AFI_PCIE_PME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	ack_bit = soc->ports[port->index].pme.ack_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	usleep_range(10000, 11000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	val = afi_readl(pcie, AFI_PCIE_PME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	afi_writel(pcie, val, AFI_PCIE_PME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static int tegra_msi_alloc(struct tegra_msi *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	int msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	mutex_lock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	if (msi < INT_PCI_MSI_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		set_bit(msi, chip->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		msi = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	mutex_unlock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	return msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	struct device *dev = chip->chip.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	mutex_lock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	if (!test_bit(irq, chip->used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		clear_bit(irq, chip->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	mutex_unlock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	struct tegra_pcie *pcie = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	struct tegra_msi *msi = &pcie->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	unsigned int i, processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		while (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 			unsigned int offset = find_first_bit(&reg, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			unsigned int index = i * 32 + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 			unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 			/* clear the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 			irq = irq_find_mapping(msi->domain, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			if (irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 				if (test_bit(index, msi->used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 					generic_handle_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 					dev_info(dev, "unhandled MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 				 * that's weird who triggered this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 				 * just clear it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 				dev_info(dev, "unexpected MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 			/* see if there's any more pending in this vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 			processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static int tegra_msi_setup_irq(struct msi_controller *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 			       struct pci_dev *pdev, struct msi_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	struct tegra_msi *msi = to_tegra_msi(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	struct msi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	hwirq = tegra_msi_alloc(msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	if (hwirq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		return hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	irq = irq_create_mapping(msi->domain, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		tegra_msi_free(msi, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	irq_set_msi_desc(irq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	msg.address_lo = lower_32_bits(msi->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	msg.address_hi = upper_32_bits(msi->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	msg.data = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	pci_write_msi_msg(irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) static void tegra_msi_teardown_irq(struct msi_controller *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 				   unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	struct tegra_msi *msi = to_tegra_msi(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	struct irq_data *d = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	irq_dispose_mapping(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	tegra_msi_free(msi, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static struct irq_chip tegra_msi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	.name = "Tegra PCIe MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	.irq_enable = pci_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	.irq_disable = pci_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	.irq_mask = pci_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	.irq_unmask = pci_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			 irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	tegra_cpuidle_pcie_irqs_in_use();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static const struct irq_domain_ops msi_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	.map = tegra_msi_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	struct platform_device *pdev = to_platform_device(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	struct tegra_msi *msi = &pcie->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	mutex_init(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	msi->chip.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	msi->chip.setup_irq = tegra_msi_setup_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	msi->chip.teardown_irq = tegra_msi_teardown_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 					    &msi_domain_ops, &msi->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (!msi->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		dev_err(dev, "failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	err = platform_get_irq_byname(pdev, "msi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		goto free_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	msi->irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 			  tegra_msi_irq_chip.name, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		dev_err(dev, "failed to request IRQ: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		goto free_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	/* Though the PCIe controller can address >32-bit address space, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	 * facilitate endpoints that support only 32-bit MSI target address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	 * the mask is set to 32-bit to make sure that MSI target address is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	 * always a 32-bit address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		goto free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 				    DMA_ATTR_NO_KERNEL_MAPPING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	if (!msi->virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		dev_err(dev, "failed to allocate DMA memory for MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		goto free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	host->msi = &msi->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	free_irq(msi->irq, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) free_irq_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	irq_domain_remove(msi->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	struct tegra_msi *msi = &pcie->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	/* this register is in 4K increments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	/* enable all MSI vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	/* and unmask the MSI interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	reg = afi_readl(pcie, AFI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	reg |= AFI_INTR_MASK_MSI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	afi_writel(pcie, reg, AFI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	struct tegra_msi *msi = &pcie->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	unsigned int i, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		       DMA_ATTR_NO_KERNEL_MAPPING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	if (msi->irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		free_irq(msi->irq, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		irq = irq_find_mapping(msi->domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			irq_dispose_mapping(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	irq_domain_remove(msi->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	/* mask the MSI interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	value = afi_readl(pcie, AFI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	value &= ~AFI_INTR_MASK_MSI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	afi_writel(pcie, value, AFI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	/* disable all MSI vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	value = afi_readl(pcie, AFI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	value &= ~AFI_INTR_MASK_INT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	afi_writel(pcie, value, AFI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 				      u32 *xbar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		case 0x010004:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 			dev_info(dev, "4x1, 1x1 configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		case 0x010102:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		case 0x010101:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			dev_info(dev, "wrong configuration updated in DT, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 				 "switching to default 2x1, 1x1, 1x1 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 				 "configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		case 0x0000104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			dev_info(dev, "4x1, 1x1 configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		case 0x0000102:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			dev_info(dev, "2x1, 1x1 configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		case 0x00000204:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 			dev_info(dev, "4x1, 2x1 configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		case 0x00020202:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 			dev_info(dev, "2x3 configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		case 0x00010104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 			dev_info(dev, "4x1, 1x2 configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		case 0x00000004:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 			dev_info(dev, "single-mode configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		case 0x00000202:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 			dev_info(dev, "dual-mode configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)  * Check whether a given set of supplies is available in a device tree node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)  * This is used to check whether the new or the legacy device tree bindings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)  * should be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) static bool of_regulator_bulk_available(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 					struct regulator_bulk_data *supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 					unsigned int num_supplies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	char property[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	for (i = 0; i < num_supplies; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		snprintf(property, 32, "%s-supply", supplies[i].supply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		if (of_find_property(np, property, NULL) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  * Old versions of the device tree binding for this device used a set of power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  * supplies that didn't match the hardware inputs. This happened to work for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)  * number of cases but is not future proof. However to preserve backwards-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)  * compatibility with old device trees, this function will try to use the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)  * set of supplies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		pcie->num_supplies = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		pcie->num_supplies = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (pcie->num_supplies == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 				      sizeof(*pcie->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 				      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	if (!pcie->supplies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	pcie->supplies[0].supply = "pex-clk";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	pcie->supplies[1].supply = "vdd";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	if (pcie->num_supplies > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		pcie->supplies[2].supply = "avdd";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)  * Obtains the list of regulators required for a particular generation of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)  * IP block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)  * This would've been nice to do simply by providing static tables for use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)  * and either seems to be optional depending on which ports are being used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		pcie->num_supplies = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 					      sizeof(*pcie->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		if (!pcie->supplies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		pcie->supplies[i++].supply = "dvdd-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		pcie->supplies[i++].supply = "hvdd-pex-pll";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		pcie->supplies[i++].supply = "hvdd-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		pcie->num_supplies = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 					      sizeof(*pcie->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		if (!pcie->supplies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		pcie->supplies[i++].supply = "hvddio-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		pcie->supplies[i++].supply = "dvddio-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		pcie->supplies[i++].supply = "vddio-pex-ctl";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		pcie->num_supplies = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 					      sizeof(*pcie->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		if (!pcie->supplies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		pcie->supplies[i++].supply = "avddio-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		pcie->supplies[i++].supply = "dvddio-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		pcie->supplies[i++].supply = "hvdd-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		pcie->supplies[i++].supply = "vddio-pex-ctl";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		bool need_pexa = false, need_pexb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		if (lane_mask & 0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			need_pexa = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		if (lane_mask & 0x30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			need_pexb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 					 (need_pexb ? 2 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 					      sizeof(*pcie->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		if (!pcie->supplies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		pcie->supplies[i++].supply = "avdd-pex-pll";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		pcie->supplies[i++].supply = "hvdd-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		pcie->supplies[i++].supply = "vddio-pex-ctl";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		pcie->supplies[i++].supply = "avdd-plle";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		if (need_pexa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 			pcie->supplies[i++].supply = "avdd-pexa";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 			pcie->supplies[i++].supply = "vdd-pexa";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		if (need_pexb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 			pcie->supplies[i++].supply = "avdd-pexb";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			pcie->supplies[i++].supply = "vdd-pexb";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		pcie->num_supplies = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 					      sizeof(*pcie->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		if (!pcie->supplies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		pcie->supplies[0].supply = "avdd-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		pcie->supplies[1].supply = "vdd-pex";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		pcie->supplies[2].supply = "avdd-pex-pll";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		pcie->supplies[3].supply = "avdd-plle";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		pcie->supplies[4].supply = "vddio-pex-clk";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 					pcie->num_supplies))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 					       pcie->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	 * If not all regulators are available for this new scheme, assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	 * that the device tree complies with an older version of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	 * tree binding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	dev_info(dev, "using legacy DT binding for power supplies\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	devm_kfree(dev, pcie->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	pcie->num_supplies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	return tegra_pcie_get_legacy_regulators(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	struct device_node *np = dev->of_node, *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	const struct tegra_pcie_soc *soc = pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	u32 lanes = 0, mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	unsigned int lane = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	/* parse root ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	for_each_child_of_node(np, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		struct tegra_pcie_port *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		char *label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		err = of_pci_get_devfn(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 			dev_err(dev, "failed to parse address: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		index = PCI_SLOT(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		if (index < 1 || index > soc->num_ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 			dev_err(dev, "invalid port number: %d\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 			goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 			dev_err(dev, "failed to parse # of lanes: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 				err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 			goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		if (value > 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 			dev_err(dev, "invalid # of lanes: %u\n", value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		lanes |= value << (index << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		if (!of_device_is_available(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 			lane += value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		mask |= ((1 << value) - 1) << lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		lane += value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		if (!rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 			goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		err = of_address_to_resource(port, 0, &rp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			dev_err(dev, "failed to parse address: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 			goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		INIT_LIST_HEAD(&rp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		rp->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		rp->lanes = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		rp->pcie = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		rp->np = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		if (IS_ERR(rp->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 			err = PTR_ERR(rp->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 			goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		if (!label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 			goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		 * Returns -ENOENT if reset-gpios property is not populated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		 * and in this case fall back to using AFI per port register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		 * to toggle PERST# SFIO line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 							     "reset-gpios", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 							     GPIOD_OUT_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 							     label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		if (IS_ERR(rp->reset_gpio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 				rp->reset_gpio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 				dev_err(dev, "failed to get reset GPIO: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 					PTR_ERR(rp->reset_gpio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 				err = PTR_ERR(rp->reset_gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 				goto err_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		list_add_tail(&rp->list, &pcie->ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		dev_err(dev, "invalid lane configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	err = tegra_pcie_get_regulators(pcie, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) err_node_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	of_node_put(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)  * FIXME: If there are no PCIe cards attached, then calling this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)  * can result in the increase of the bootup time as there are big timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)  * loops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	struct device *dev = port->pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	unsigned int retries = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	unsigned long value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	/* override presence detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	value = readl(port->base + RP_PRIV_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	writel(value, port->base + RP_PRIV_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 			value = readl(port->base + RP_VEND_XP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 			if (value & RP_VEND_XP_DL_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		} while (--timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			dev_dbg(dev, "link %u down, retrying\n", port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 			usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		} while (--timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		tegra_pcie_port_reset(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	} while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	ktime_t deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	list_for_each_entry(port, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		 * is not supported by Tegra. tegra_pcie_change_link_speed()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		 * is called only for Tegra chips which support Gen2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		 * So there no harm if supported link speed is not verified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		value &= ~PCI_EXP_LNKSTA_CLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		 * Poll until link comes back from recovery to avoid race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		 * condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		while (ktime_before(ktime_get(), deadline)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			usleep_range(2000, 3000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		if (value & PCI_EXP_LNKSTA_LT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 			dev_warn(dev, "PCIe port %u link is in recovery\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 				 port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		/* Retrain the link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		value = readl(port->base + RP_LINK_CONTROL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		value |= PCI_EXP_LNKCTL_RL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		writel(value, port->base + RP_LINK_CONTROL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		while (ktime_before(ktime_get(), deadline)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			usleep_range(2000, 3000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		if (value & PCI_EXP_LNKSTA_LT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			dev_err(dev, "failed to retrain link of port %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 				port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	struct tegra_pcie_port *port, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		dev_info(dev, "probing port %u, using %u lanes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 			 port->index, port->lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		tegra_pcie_port_enable(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	/* Start LTSSM from Tegra side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	reset_control_deassert(pcie->pcie_xrst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		if (tegra_pcie_port_check_link(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		dev_info(dev, "link %u down, ignoring\n", port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		tegra_pcie_port_disable(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		tegra_pcie_port_free(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	if (pcie->soc->has_gen2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		tegra_pcie_change_link_speed(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	struct tegra_pcie_port *port, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	reset_control_assert(pcie->pcie_xrst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		tegra_pcie_port_disable(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) static const struct tegra_pcie_soc tegra20_pcie = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	.num_ports = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	.ports = tegra20_pcie_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	.msi_base_shift = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	.pads_refclk_cfg0 = 0xfa5cfa5c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	.has_pex_clkreq_en = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	.has_pex_bias_ctrl = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	.has_intr_prsnt_sense = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	.has_cml_clk = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	.has_gen2 = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	.force_pca_enable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	.program_uphy = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	.update_clamp_threshold = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	.program_deskew_time = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	.update_fc_timer = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	.has_cache_bars = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	.ectl.enable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) static const struct tegra_pcie_soc tegra30_pcie = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	.num_ports = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	.ports = tegra30_pcie_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	.msi_base_shift = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	.afi_pex2_ctrl = 0x128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	.pads_refclk_cfg0 = 0xfa5cfa5c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	.pads_refclk_cfg1 = 0xfa5cfa5c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	.has_pex_clkreq_en = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	.has_pex_bias_ctrl = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	.has_intr_prsnt_sense = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	.has_cml_clk = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	.has_gen2 = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	.force_pca_enable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	.program_uphy = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	.update_clamp_threshold = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	.program_deskew_time = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	.update_fc_timer = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	.has_cache_bars = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	.ectl.enable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) static const struct tegra_pcie_soc tegra124_pcie = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	.num_ports = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	.ports = tegra20_pcie_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	.msi_base_shift = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	.pads_refclk_cfg0 = 0x44ac44ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	.has_pex_clkreq_en = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	.has_pex_bias_ctrl = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	.has_intr_prsnt_sense = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	.has_cml_clk = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	.has_gen2 = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	.force_pca_enable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	.program_uphy = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	.update_clamp_threshold = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	.program_deskew_time = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	.update_fc_timer = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	.has_cache_bars = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	.ectl.enable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) static const struct tegra_pcie_soc tegra210_pcie = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	.num_ports = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	.ports = tegra20_pcie_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	.msi_base_shift = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	.pads_refclk_cfg0 = 0x90b890b8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	/* FC threshold is bit[25:18] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	.update_fc_threshold = 0x01800000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	.has_pex_clkreq_en = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	.has_pex_bias_ctrl = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	.has_intr_prsnt_sense = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	.has_cml_clk = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	.has_gen2 = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	.force_pca_enable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	.program_uphy = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	.update_clamp_threshold = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	.program_deskew_time = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	.update_fc_timer = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	.has_cache_bars = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	.ectl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		.regs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			.rp_ectl_2_r1 = 0x0000000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			.rp_ectl_4_r1 = 0x00000067,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 			.rp_ectl_5_r1 = 0x55010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 			.rp_ectl_6_r1 = 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 			.rp_ectl_2_r2 = 0x0000008f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 			.rp_ectl_4_r2 = 0x000000c7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			.rp_ectl_5_r2 = 0x55010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 			.rp_ectl_6_r2 = 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		.enable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) static const struct tegra_pcie_soc tegra186_pcie = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	.num_ports = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	.ports = tegra186_pcie_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	.msi_base_shift = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	.afi_pex2_ctrl = 0x19c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	.pads_refclk_cfg0 = 0x80b880b8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	.pads_refclk_cfg1 = 0x000480b8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	.has_pex_clkreq_en = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	.has_pex_bias_ctrl = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	.has_intr_prsnt_sense = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	.has_cml_clk = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	.has_gen2 = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	.force_pca_enable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	.program_uphy = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	.update_clamp_threshold = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	.program_deskew_time = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	.update_fc_timer = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	.has_cache_bars = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	.ectl.enable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) static const struct of_device_id tegra_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	struct tegra_pcie *pcie = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	if (list_empty(&pcie->ports))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	seq_printf(s, "Index  Status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	return seq_list_start(&pcie->ports, *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	struct tegra_pcie *pcie = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	return seq_list_next(v, &pcie->ports, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	bool up = false, active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	port = list_entry(v, struct tegra_pcie_port, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	value = readl(port->base + RP_VEND_XP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	if (value & RP_VEND_XP_DL_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	seq_printf(s, "%2u     ", port->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	if (up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		seq_printf(s, "up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	if (active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 		if (up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 			seq_printf(s, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		seq_printf(s, "active");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	seq_printf(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) static const struct seq_operations tegra_pcie_ports_sops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	.start = tegra_pcie_ports_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	.next = tegra_pcie_ports_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	.stop = tegra_pcie_ports_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	.show = tegra_pcie_ports_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	debugfs_remove_recursive(pcie->debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	pcie->debugfs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 			    &tegra_pcie_ports_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) static int tegra_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	struct pci_host_bridge *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	struct tegra_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	pcie = pci_host_bridge_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	host->sysdata = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	pcie->soc = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	INIT_LIST_HEAD(&pcie->ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	pcie->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	err = tegra_pcie_parse_dt(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	err = tegra_pcie_get_resources(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		dev_err(dev, "failed to request resources: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	err = tegra_pcie_msi_setup(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		dev_err(dev, "failed to enable MSI support: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		goto put_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	pm_runtime_enable(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	err = pm_runtime_get_sync(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		goto pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	host->ops = &tegra_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	host->map_irq = tegra_pcie_map_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	err = pci_host_probe(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		dev_err(dev, "failed to register host: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		goto pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	if (IS_ENABLED(CONFIG_DEBUG_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		tegra_pcie_debugfs_init(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) pm_runtime_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	pm_runtime_put_sync(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	pm_runtime_disable(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	tegra_pcie_msi_teardown(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) put_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	tegra_pcie_put_resources(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) static int tegra_pcie_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	struct tegra_pcie_port *port, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	if (IS_ENABLED(CONFIG_DEBUG_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		tegra_pcie_debugfs_exit(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	pci_stop_root_bus(host->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	pci_remove_root_bus(host->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	pm_runtime_put_sync(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	pm_runtime_disable(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		tegra_pcie_msi_teardown(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	tegra_pcie_put_resources(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		tegra_pcie_port_free(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	struct tegra_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	list_for_each_entry(port, &pcie->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		tegra_pcie_pme_turnoff(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	tegra_pcie_disable_ports(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	tegra_pcie_disable_interrupts(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	if (pcie->soc->program_uphy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		err = tegra_pcie_phy_power_off(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 			dev_err(dev, "failed to power off PHY(s): %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	reset_control_assert(pcie->pex_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	clk_disable_unprepare(pcie->pex_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		tegra_pcie_disable_msi(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	pinctrl_pm_select_idle_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	tegra_pcie_power_off(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	err = tegra_pcie_power_on(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	err = pinctrl_pm_select_default_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		goto poweroff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	tegra_pcie_enable_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	tegra_pcie_setup_translations(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		tegra_pcie_enable_msi(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	err = clk_prepare_enable(pcie->pex_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		dev_err(dev, "failed to enable PEX clock: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		goto pex_dpd_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	reset_control_deassert(pcie->pex_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	if (pcie->soc->program_uphy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		err = tegra_pcie_phy_power_on(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 			goto disable_pex_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	tegra_pcie_apply_pad_settings(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	tegra_pcie_enable_ports(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) disable_pex_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	reset_control_assert(pcie->pex_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	clk_disable_unprepare(pcie->pex_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) pex_dpd_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	pinctrl_pm_select_idle_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) poweroff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	tegra_pcie_power_off(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) static const struct dev_pm_ops tegra_pcie_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 				      tegra_pcie_pm_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) static struct platform_driver tegra_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		.name = "tegra-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		.of_match_table = tegra_pcie_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 		.suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		.pm = &tegra_pcie_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	.probe = tegra_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	.remove = tegra_pcie_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) module_platform_driver(tegra_pcie_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) MODULE_LICENSE("GPL");