Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* Copyright (C) 2009 - 2019 Broadcom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define BRCM_PCIE_CAP_REGS				0x00ac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) /* Broadcom STB PCIe Register Offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1				0x0188
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define  PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK	0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define  PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN			0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define PCIE_RC_CFG_PRIV1_ID_VAL3			0x043c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK	0xffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY			0x04dc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define  PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK	0xc00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define PCIE_RC_DL_MDIO_ADDR				0x1100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define PCIE_RC_DL_MDIO_WR_DATA				0x1104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define PCIE_RC_DL_MDIO_RD_DATA				0x1108
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define PCIE_MISC_MISC_CTRL				0x4008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define  PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK		0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define  PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK	0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK	0x300000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK		0xf8000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define  PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK		0x07c00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define  PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK		0x0000001f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define  SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO		0x400c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define PCIE_MEM_WIN0_LO(win)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI		0x4010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define PCIE_MEM_WIN0_HI(win)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define PCIE_MISC_RC_BAR1_CONFIG_LO			0x402c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define  PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK		0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define PCIE_MISC_RC_BAR2_CONFIG_LO			0x4034
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK		0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define PCIE_MISC_RC_BAR2_CONFIG_HI			0x4038
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define PCIE_MISC_RC_BAR3_CONFIG_LO			0x403c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define  PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK		0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define PCIE_MISC_MSI_BAR_CONFIG_LO			0x4044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define PCIE_MISC_MSI_BAR_CONFIG_HI			0x4048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define PCIE_MISC_MSI_DATA_CONFIG			0x404c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define  PCIE_MISC_MSI_DATA_CONFIG_VAL_32		0xffe06540
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define  PCIE_MISC_MSI_DATA_CONFIG_VAL_8		0xfff86540
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define PCIE_MISC_PCIE_CTRL				0x4064
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define  PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK	0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK		0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define PCIE_MISC_PCIE_STATUS				0x4068
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define  PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define  PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define  PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK	0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define  PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define PCIE_MISC_REVISION				0x406c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define  BRCM_PCIE_HW_REV_33				0x0303
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT		0x4070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK	0xfff00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK	0xfff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define PCIE_MEM_WIN0_BASE_LIMIT(win)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI			0x4080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK	0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define PCIE_MEM_WIN0_BASE_HI(win)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI			0x4084
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK	0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define PCIE_MEM_WIN0_LIMIT_HI(win)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define PCIE_MISC_HARD_PCIE_HARD_DEBUG					0x4204
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK	0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x08000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define PCIE_INTR2_CPU_BASE		0x4300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define PCIE_MSI_INTR2_BASE		0x4500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) /* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define  MSI_INT_STATUS			0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define  MSI_INT_CLR			0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define  MSI_INT_MASK_SET		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define  MSI_INT_MASK_CLR		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define PCIE_EXT_CFG_DATA				0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define PCIE_EXT_CFG_INDEX				0x9000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define  PCIE_EXT_BUSNUM_SHIFT				20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define  PCIE_EXT_SLOT_SHIFT				15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define  PCIE_EXT_FUNC_SHIFT				12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define  PCIE_RGR1_SW_INIT_1_PERST_MASK			0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) #define  PCIE_RGR1_SW_INIT_1_PERST_SHIFT		0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define RGR1_SW_INIT_1_INIT_GENERIC_MASK		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT		0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define RGR1_SW_INIT_1_INIT_7278_MASK			0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define RGR1_SW_INIT_1_INIT_7278_SHIFT			0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) /* PCIe parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define BRCM_NUM_PCIE_OUT_WINS		0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define BRCM_INT_PCI_MSI_NR		32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define BRCM_INT_PCI_MSI_LEGACY_NR	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define BRCM_INT_PCI_MSI_SHIFT		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) /* MSI target adresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define BRCM_MSI_TARGET_ADDR_LT_4GB	0x0fffffffcULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define BRCM_MSI_TARGET_ADDR_GT_4GB	0xffffffffcULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) /* MDIO registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define MDIO_PORT0			0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define MDIO_DATA_MASK			0x7fffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define MDIO_PORT_MASK			0xf0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define MDIO_REGAD_MASK			0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define MDIO_CMD_MASK			0xfff00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define MDIO_CMD_READ			0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define MDIO_CMD_WRITE			0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define MDIO_DATA_DONE_MASK		0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define MDIO_RD_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define MDIO_WT_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define SSC_REGS_ADDR			0x1100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define SET_ADDR_OFFSET			0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define SSC_CNTL_OFFSET			0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #define SSC_CNTL_OVRD_EN_MASK		0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define SSC_CNTL_OVRD_VAL_MASK		0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define SSC_STATUS_OFFSET		0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define SSC_STATUS_SSC_MASK		0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #define SSC_STATUS_PLL_LOCK_MASK	0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #define PCIE_BRCM_MAX_MEMC		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define IDX_ADDR(pcie)			(pcie->reg_offsets[EXT_CFG_INDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define DATA_ADDR(pcie)			(pcie->reg_offsets[EXT_CFG_DATA])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) #define PCIE_RGR1_SW_INIT_1(pcie)	(pcie->reg_offsets[RGR1_SW_INIT_1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) /* Rescal registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define PCIE_DVT_PMU_PCIE_PHY_CTRL				0xc700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS			0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK		0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT	0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT		0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK		0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT		0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) /* Forward declarations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) struct brcm_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	RGR1_SW_INIT_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	EXT_CFG_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	EXT_CFG_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	RGR1_SW_INIT_1_INIT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	RGR1_SW_INIT_1_INIT_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) enum pcie_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	GENERIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	BCM7278,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	BCM2711,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) struct pcie_cfg_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	const int *offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	const enum pcie_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	void (*perst_set)(struct brcm_pcie *pcie, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static const int pcie_offsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	[RGR1_SW_INIT_1] = 0x9210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	[EXT_CFG_INDEX]  = 0x9000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	[EXT_CFG_DATA]   = 0x9004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static const struct pcie_cfg_data generic_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	.offsets	= pcie_offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	.type		= GENERIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	.perst_set	= brcm_pcie_perst_set_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static const int pcie_offset_bcm7278[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	[RGR1_SW_INIT_1] = 0xc010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	[EXT_CFG_INDEX] = 0x9000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	[EXT_CFG_DATA] = 0x9004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static const struct pcie_cfg_data bcm7278_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	.offsets	= pcie_offset_bcm7278,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	.type		= BCM7278,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	.perst_set	= brcm_pcie_perst_set_7278,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static const struct pcie_cfg_data bcm2711_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	.offsets	= pcie_offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	.type		= BCM2711,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	.perst_set	= brcm_pcie_perst_set_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) struct brcm_msi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	void __iomem		*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct device_node	*np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct irq_domain	*msi_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct irq_domain	*inner_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	struct mutex		lock; /* guards the alloc/free operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	u64			target_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	int			irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	/* used indicates which MSI interrupts have been alloc'd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	unsigned long		used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	bool			legacy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	/* Some chips have MSIs in bits [31..24] of a shared register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	int			legacy_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	int			nr; /* No. of MSI available, depends on chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	/* This is the base pointer for interrupt status/set/clr regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	void __iomem		*intr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) /* Internal PCIe Host Controller Information.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) struct brcm_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	void __iomem		*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	struct clk		*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	struct device_node	*np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	bool			ssc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	int			gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	u64			msi_target_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	struct brcm_msi		*msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	const int		*reg_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	enum pcie_type		type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct reset_control	*rescal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	int			num_memc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	u64			memc_size[PCIE_BRCM_MAX_MEMC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	u32			hw_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	void			(*perst_set)(struct brcm_pcie *pcie, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	void			(*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * This is to convert the size of the inbound "BAR" region to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) static int brcm_pcie_encode_ibar_size(u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	int log2_in = ilog2(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (log2_in >= 12 && log2_in <= 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		/* Covers 4KB to 32KB (inclusive) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		return (log2_in - 12) + 0x1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	else if (log2_in >= 16 && log2_in <= 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		/* Covers 64KB to 32GB, (inclusive) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		return log2_in - 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	/* Something is awry so disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	u32 pkt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	return pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) /* negative return value indicates error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	int tries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		   base + PCIE_RC_DL_MDIO_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	readl(base + PCIE_RC_DL_MDIO_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	*val = FIELD_GET(MDIO_DATA_MASK, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	return MDIO_RD_DONE(data) ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) /* negative return value indicates error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 				u8 regad, u16 wrdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	int tries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		   base + PCIE_RC_DL_MDIO_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	readl(base + PCIE_RC_DL_MDIO_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return MDIO_WT_DONE(data) ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * return value indicates error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	int pll, ssc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 				   SSC_REGS_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 				  SSC_CNTL_OFFSET, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 				   SSC_CNTL_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 				  SSC_STATUS_OFFSET, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	return ssc && pll ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) /* Limits operation to a specific generation (1, 2, or 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	lnkctl2 = (lnkctl2 & ~0xf) | gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				       unsigned int win, u64 cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 				       u64 pcie_addr, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	u32 cpu_addr_mb_high, limit_addr_mb_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	phys_addr_t cpu_addr_mb, limit_addr_mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	int high_addr_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	/* Set the base of the pcie_addr window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	/* Write the addr base & limit lower bits (in MBs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	cpu_addr_mb = cpu_addr / SZ_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	u32p_replace_bits(&tmp, cpu_addr_mb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	u32p_replace_bits(&tmp, limit_addr_mb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/* Write the cpu & limit addr upper bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	high_addr_shift =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	u32p_replace_bits(&tmp, cpu_addr_mb_high,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	u32p_replace_bits(&tmp, limit_addr_mb_high,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) static struct irq_chip brcm_msi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	.name            = "BRCM STB PCIe MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	.irq_ack         = irq_chip_ack_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	.irq_mask        = pci_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	.irq_unmask      = pci_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) static struct msi_domain_info brcm_msi_domain_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	/* Multi MSI is supported by the controller, but not by this driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	.chip	= &brcm_msi_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static void brcm_pcie_msi_isr(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	unsigned long status, virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct brcm_msi *msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	u32 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	msi = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	dev = msi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	status = readl(msi->intr_base + MSI_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	status >>= msi->legacy_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	for_each_set_bit(bit, &status, msi->nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		virq = irq_find_mapping(msi->inner_domain, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			dev_dbg(dev, "unexpected MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	msg->address_lo = lower_32_bits(msi->target_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	msg->address_hi = upper_32_bits(msi->target_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) static int brcm_msi_set_affinity(struct irq_data *irq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 				 const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) static void brcm_msi_ack_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	const int shift_amt = data->hwirq + msi->legacy_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static struct irq_chip brcm_msi_bottom_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	.name			= "BRCM STB MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	.irq_compose_msi_msg	= brcm_msi_compose_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	.irq_set_affinity	= brcm_msi_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	.irq_ack                = brcm_msi_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) static int brcm_msi_alloc(struct brcm_msi *msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	mutex_lock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	mutex_unlock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	return hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	mutex_lock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	bitmap_release_region(&msi->used, hwirq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	mutex_unlock(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 				 unsigned int nr_irqs, void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	struct brcm_msi *msi = domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	hwirq = brcm_msi_alloc(msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (hwirq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		return hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			    &brcm_msi_bottom_irq_chip, domain->host_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			    handle_edge_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static void brcm_irq_domain_free(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 				 unsigned int virq, unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	brcm_msi_free(msi, d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static const struct irq_domain_ops msi_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	.alloc	= brcm_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	.free	= brcm_irq_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) static int brcm_allocate_domains(struct brcm_msi *msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	struct device *dev = msi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (!msi->inner_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		dev_err(dev, "failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 						    &brcm_msi_domain_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 						    msi->inner_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (!msi->msi_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		dev_err(dev, "failed to create MSI domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		irq_domain_remove(msi->inner_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static void brcm_free_domains(struct brcm_msi *msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	irq_domain_remove(msi->msi_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	irq_domain_remove(msi->inner_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static void brcm_msi_remove(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct brcm_msi *msi = pcie->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (!msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	irq_set_chained_handler(msi->irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	irq_set_handler_data(msi->irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	brcm_free_domains(msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) static void brcm_msi_set_regs(struct brcm_msi *msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	u32 val = __GENMASK(31, msi->legacy_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	writel(val, msi->intr_base + MSI_INT_MASK_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	writel(val, msi->intr_base + MSI_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	 * enable, which we set to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	writel(lower_32_bits(msi->target_addr) | 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	writel(upper_32_bits(msi->target_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	struct brcm_msi *msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	irq = irq_of_parse_and_map(dev->of_node, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if (irq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		dev_err(dev, "cannot map MSI interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (!msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	mutex_init(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	msi->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	msi->base = pcie->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	msi->np = pcie->np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	msi->target_addr = pcie->msi_target_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	msi->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (msi->legacy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		msi->legacy_shift = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		msi->nr = BRCM_INT_PCI_MSI_NR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		msi->legacy_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	ret = brcm_allocate_domains(msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	brcm_msi_set_regs(msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	pcie->msi = msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) /* The controller is capable of serving in both RC and EP roles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	void __iomem *base = pcie->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	return dla && plu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) /* Configuration space read/write support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static inline int brcm_pcie_cfg_index(int busnr, int devfn, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	return ((PCI_SLOT(devfn) & 0x1f) << PCIE_EXT_SLOT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		| ((PCI_FUNC(devfn) & 0x07) << PCIE_EXT_FUNC_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		| (busnr << PCIE_EXT_BUSNUM_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		| (reg & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 					int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct brcm_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	void __iomem *base = pcie->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	/* Accesses to the RC go right to the RC registers if slot==0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		return PCI_SLOT(devfn) ? NULL : base + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	/* For devices, write to the config space index register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	idx = brcm_pcie_cfg_index(bus->number, devfn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	return base + PCIE_EXT_CFG_DATA + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) static struct pci_ops brcm_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	.map_bus = brcm_pcie_map_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	.read = pci_generic_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	.write = pci_generic_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_GENERIC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	tmp = (tmp & ~mask) | ((val << shift) & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_7278_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	tmp = (tmp & ~mask) | ((val << shift) & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	/* Perst bit has moved and assert value is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	writel(tmp, pcie->base +  PCIE_MISC_PCIE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 							u64 *rc_bar2_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 							u64 *rc_bar2_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	u64 lowest_pcie_addr = ~(u64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	int ret, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	u64 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		u64 pcie_beg = entry->res->start - entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		size += entry->res->end - entry->res->start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		if (pcie_beg < lowest_pcie_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			lowest_pcie_addr = pcie_beg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (lowest_pcie_addr == ~(u64)0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		dev_err(dev, "DT node has no dma-ranges\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 						  PCIE_BRCM_MAX_MEMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		/* Make an educated guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		pcie->num_memc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		pcie->memc_size[0] = 1ULL << fls64(size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		pcie->num_memc = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	/* Each memc is viewed through a "port" that is a power of 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	for (i = 0, size = 0; i < pcie->num_memc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		size += pcie->memc_size[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	/* System memory starts at this address in PCIe-space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	*rc_bar2_offset = lowest_pcie_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/* The sum of all memc views must also be a power of 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	*rc_bar2_size = 1ULL << fls64(size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	 * We validate the inbound memory view even though we should trust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	 * whatever the device-tree provides. This is because of an HW issue on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	 * early Raspberry Pi 4's revisions (bcm2711). It turns out its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	 * firmware has to dynamically edit dma-ranges due to a bug on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * PCIe controller integration, which prohibits any access above the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 * lower 3GB of memory. Given this, we decided to keep the dma-ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 * in check, avoiding hard to debug device-tree related issues in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 * future:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	 * The PCIe host controller by design must set the inbound viewport to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	 * be a contiguous arrangement of all of the system's memory.  In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	 * addition, its size mut be a power of two.  To further complicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	 * matters, the viewport must start on a pcie-address that is aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	 * on a multiple of its size.  If a portion of the viewport does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	 * represent system memory -- e.g. 3GB of memory requires a 4GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 * viewport -- we can map the outbound memory in or after 3GB and even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 * though the viewport will overlap the outbound memory the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 * will know to send outbound memory downstream and everything else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 * upstream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * For example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * - The best-case scenario, memory up to 3GB, is to place the inbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 *   region in the first 4GB of pcie-space, as some legacy devices can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 *   only address 32bits. We would also like to put the MSI under 4GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	 *   as well, since some devices require a 32bit MSI target address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	 * - If the system memory is 4GB or larger we cannot start the inbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	 *   region at location 0 (since we have to allow some space for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	 *   outbound memory @ 3GB). So instead it will  start at the 1x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 *   multiple of its size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	    (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			*rc_bar2_size, *rc_bar2_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) static int brcm_pcie_setup(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	u64 rc_bar2_offset, rc_bar2_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	void __iomem *base = pcie->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	bool ssc_good = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	int num_out_wins = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	u16 nlw, cls, lnksta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	int i, ret, memc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	u32 tmp, burst, aspm_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	/* Reset the bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	pcie->bridge_sw_init_set(pcie, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	/* Take the bridge out of reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	pcie->bridge_sw_init_set(pcie, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	/* Wait for SerDes to be stable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 * SCB_MAX_BURST_SIZE is a two bit field.  For GENERIC chips it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	 * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	 * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (pcie->type == BCM2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		burst = 0x0; /* 128B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	else if (pcie->type == BCM7278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		burst = 0x3; /* 512 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		burst = 0x2; /* 512 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	tmp = readl(base + PCIE_MISC_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	writel(tmp, base + PCIE_MISC_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 						    &rc_bar2_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	tmp = lower_32_bits(rc_bar2_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	writel(upper_32_bits(rc_bar2_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	       base + PCIE_MISC_RC_BAR2_CONFIG_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	tmp = readl(base + PCIE_MISC_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	for (memc = 0; memc < pcie->num_memc; memc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		if (memc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		else if (memc == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		else if (memc == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	writel(tmp, base + PCIE_MISC_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	 * We ideally want the MSI target address to be located in the 32bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 * addressable memory area. Some devices might depend on it. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 * possible either when the inbound window is located above the lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 * 4GB or when the inbound area is smaller than 4GB (taking into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * account the rounding-up we're forced to perform).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/* disable the PCIe->GISB memory window (RC_BAR1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	/* disable the PCIe->SCB memory window (RC_BAR3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (pcie->gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		brcm_pcie_set_gen(pcie, pcie->gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	/* Unassert the fundamental reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	pcie->perst_set(pcie, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	 * Give the RC/EP time to wake up, before trying to configure RC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	 * Intermittently check status for link-up, up to a total of 100ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		msleep(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (!brcm_pcie_link_up(pcie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		dev_err(dev, "link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (!brcm_pcie_rc_mode(pcie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		dev_err(dev, "PCIe misconfigured; is in EP mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	resource_list_for_each_entry(entry, &bridge->windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		res = entry->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		if (resource_type(res) != IORESOURCE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			dev_err(pcie->dev, "too many outbound wins\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 					   res->start - entry->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 					   resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		num_out_wins++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	/* Don't advertise L0s capability if 'aspm-no-l0s' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	aspm_support = PCIE_LINK_STATE_L1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		aspm_support |= PCIE_LINK_STATE_L0S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	u32p_replace_bits(&tmp, aspm_support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	 * For config space accesses on the RC, show the right class for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	 * a PCIe-PCIe bridge (the default setting is to be EP mode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	u32p_replace_bits(&tmp, 0x060400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	if (pcie->ssc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		ret = brcm_pcie_set_ssc(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			ssc_good = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			dev_err(dev, "failed attempt to enter ssc mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	dev_info(dev, "link up, %s x%u %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		 pci_speed_string(pcie_link_speed[cls]), nlw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		 ssc_good ? "(SSC)" : "(!SSC)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	/* PCIe->SCB endian mode for BAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	 * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* L23 is a low-power PCIe link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	void __iomem *base = pcie->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	int l23, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	/* Assert request for L23 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	/* Wait up to 36 msec for L23 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	tmp = readl(base + PCIE_MISC_PCIE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	for (i = 0; i < 15 && !l23; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		usleep_range(2000, 2400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		tmp = readl(base + PCIE_MISC_PCIE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (!l23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		dev_err(pcie->dev, "failed to enter low-power link state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	u32 tmp, combined_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	void __iomem *base = pcie->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	for (i = beg; i != end; start ? i++ : i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		val = start ? BIT_MASK(shifts[i]) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		tmp = (tmp & ~masks[i]) | (val & masks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		usleep_range(50, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		combined_mask |= masks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	val = start ? combined_mask : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	ret = (tmp & combined_mask) == val ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static inline int brcm_phy_start(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static inline int brcm_phy_stop(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	void __iomem *base = pcie->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	if (brcm_pcie_link_up(pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		brcm_pcie_enter_l23(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	/* Assert fundamental reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	pcie->perst_set(pcie, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	/* Deassert request for L23 in case it was asserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	/* Turn off SerDes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	/* Shutdown PCIe bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	pcie->bridge_sw_init_set(pcie, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static int brcm_pcie_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	struct brcm_pcie *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	brcm_pcie_turn_off(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	ret = brcm_phy_stop(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	clk_disable_unprepare(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static int brcm_pcie_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	struct brcm_pcie *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	base = pcie->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	clk_prepare_enable(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	ret = brcm_phy_start(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	/* Take bridge out of reset so we can access the SERDES reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	pcie->bridge_sw_init_set(pcie, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	/* SERDES_IDDQ = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	/* wait for serdes to be stable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	ret = brcm_pcie_setup(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (pcie->msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		brcm_msi_set_regs(pcie->msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	clk_disable_unprepare(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static void __brcm_pcie_remove(struct brcm_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	brcm_msi_remove(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	brcm_pcie_turn_off(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	brcm_phy_stop(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	reset_control_assert(pcie->rescal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	clk_disable_unprepare(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static int brcm_pcie_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	struct brcm_pcie *pcie = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	pci_stop_root_bus(bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	pci_remove_root_bus(bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	__brcm_pcie_remove(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static const struct of_device_id brcm_pcie_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static int brcm_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct device_node *np = pdev->dev.of_node, *msi_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	const struct pcie_cfg_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	struct brcm_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	data = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		pr_err("failed to look up compatible string\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	pcie = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	pcie->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	pcie->np = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	pcie->reg_offsets = data->offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	pcie->type = data->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	pcie->perst_set = data->perst_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	pcie->bridge_sw_init_set = data->bridge_sw_init_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	pcie->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (IS_ERR(pcie->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		return PTR_ERR(pcie->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (IS_ERR(pcie->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		return PTR_ERR(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	ret = of_pci_get_max_link_speed(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	pcie->gen = (ret < 0) ? 0 : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	ret = clk_prepare_enable(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		dev_err(&pdev->dev, "could not enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	if (IS_ERR(pcie->rescal)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		clk_disable_unprepare(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		return PTR_ERR(pcie->rescal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	ret = reset_control_deassert(pcie->rescal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	ret = brcm_phy_start(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		reset_control_assert(pcie->rescal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		clk_disable_unprepare(pcie->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	ret = brcm_pcie_setup(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	if (pci_msi_enabled() && msi_np == pcie->np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		ret = brcm_pcie_enable_msi(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 			dev_err(pcie->dev, "probe of internal MSI failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	bridge->ops = &brcm_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	bridge->sysdata = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	return pci_host_probe(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	__brcm_pcie_remove(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) MODULE_DEVICE_TABLE(of, brcm_pcie_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static const struct dev_pm_ops brcm_pcie_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	.suspend = brcm_pcie_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	.resume = brcm_pcie_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static struct platform_driver brcm_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	.probe = brcm_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	.remove = brcm_pcie_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		.name = "brcm-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		.of_match_table = brcm_pcie_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		.pm = &brcm_pcie_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) module_platform_driver(brcm_pcie_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) MODULE_AUTHOR("Broadcom");