^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCIe host controller driver for Tegra194 SoC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2019 NVIDIA Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Vidya Sagar <vidyas@nvidia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <soc/tegra/bpmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <soc/tegra/bpmp-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "../../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define APPL_PINMUX 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define APPL_PINMUX_PEX_RST BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define APPL_CTRL 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define APPL_CTRL_LTSSM_EN BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define APPL_CTRL_HW_HOT_RST_EN BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define APPL_INTR_EN_L0_0 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define APPL_INTR_STATUS_L0 0xC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define APPL_INTR_STATUS_L0_INT_INT BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define APPL_INTR_EN_L1_0_0 0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define APPL_INTR_STATUS_L1_0_0 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define APPL_INTR_STATUS_L1_1 0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define APPL_INTR_STATUS_L1_2 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define APPL_INTR_STATUS_L1_3 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define APPL_INTR_STATUS_L1_6 0x3C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define APPL_INTR_STATUS_L1_7 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define APPL_INTR_EN_L1_8_0 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define APPL_INTR_STATUS_L1_8_0 0x4C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define APPL_INTR_STATUS_L1_9 0x54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define APPL_INTR_STATUS_L1_10 0x58
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define APPL_INTR_STATUS_L1_11 0x64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define APPL_INTR_STATUS_L1_13 0x74
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define APPL_INTR_STATUS_L1_14 0x78
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define APPL_INTR_STATUS_L1_15 0x7C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define APPL_INTR_STATUS_L1_17 0x88
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define APPL_INTR_EN_L1_18 0x90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define APPL_INTR_STATUS_L1_18 0x94
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define APPL_MSI_CTRL_1 0xAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define APPL_MSI_CTRL_2 0xB0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define APPL_LEGACY_INTX 0xB8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define APPL_LTR_MSG_1 0xC4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define LTR_MSG_REQ BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define LTR_MST_NO_SNOOP_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define APPL_LTR_MSG_2 0xC8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define APPL_LINK_STATUS 0xCC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define APPL_DEBUG 0xD0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define APPL_DEBUG_PM_LINKST_IN_L0 0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define APPL_DEBUG_LTSSM_STATE_SHIFT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define LTSSM_STATE_PRE_DETECT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define APPL_RADM_STATUS 0xE4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define APPL_PM_XMT_TURNOFF_STATE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define APPL_DM_TYPE 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define APPL_DM_TYPE_MASK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define APPL_DM_TYPE_RP 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define APPL_DM_TYPE_EP 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define APPL_CFG_BASE_ADDR 0x104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define APPL_CFG_MISC 0x110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define APPL_CFG_MISC_ARCACHE_SHIFT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define APPL_CFG_MISC_ARCACHE_VAL 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define APPL_CFG_SLCG_OVERRIDE 0x114
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define APPL_CAR_RESET_OVRD 0x12C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define IO_BASE_IO_DECODE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define IO_BASE_IO_DECODE_BIT8 BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define EVENT_COUNTER_ALL_CLEAR 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define EVENT_COUNTER_ENABLE_ALL 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define EVENT_COUNTER_ENABLE_SHIFT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define EVENT_COUNTER_EVENT_SEL_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define EVENT_COUNTER_EVENT_Tx_L0S 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define EVENT_COUNTER_EVENT_Rx_L0S 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define EVENT_COUNTER_EVENT_L1 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define EVENT_COUNTER_EVENT_L1_1 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define EVENT_COUNTER_EVENT_L1_2 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define EVENT_COUNTER_GROUP_SEL_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define EVENT_COUNTER_GROUP_5 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define N_FTS_VAL 52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define FTS_VAL 52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define GEN3_EQ_CONTROL_OFF 0x8a8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define GEN3_RELATED_OFF 0x890
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define AMBA_ERROR_RESPONSE_CRS_OKAY 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define MSIX_ADDR_MATCH_LOW_OFF 0x940
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define MSIX_ADDR_MATCH_HIGH_OFF 0x944
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define PORT_LOGIC_MSIX_DOORBELL 0x948
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define CAP_SPCIE_CAP_OFF 0x154
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define PME_ACK_TIMEOUT 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define LTSSM_TIMEOUT 50000 /* 50ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define GEN3_GEN4_EQ_PRESET_INIT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define GEN1_CORE_CLK_FREQ 62500000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define GEN2_CORE_CLK_FREQ 125000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define GEN3_CORE_CLK_FREQ 250000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define GEN4_CORE_CLK_FREQ 500000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define LTR_MSG_TIMEOUT (100 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define PERST_DEBOUNCE_TIME (5 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define EP_STATE_DISABLED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define EP_STATE_ENABLED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static const unsigned int pcie_gen_freq[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) GEN1_CORE_CLK_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) GEN2_CORE_CLK_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) GEN3_CORE_CLK_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) GEN4_CORE_CLK_FREQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static const u32 event_cntr_ctrl_offset[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 0x1d8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 0x1a8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 0x1a8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 0x1a8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 0x1c4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 0x1d8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static const u32 event_cntr_data_offset[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 0x1dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 0x1ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 0x1ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 0x1ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 0x1c8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 0x1dc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct tegra_pcie_dw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct resource *appl_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct resource *dbi_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct resource *atu_dma_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void __iomem *appl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct clk *core_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct reset_control *core_apb_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct reset_control *core_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct dw_pcie pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct tegra_bpmp *bpmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) enum dw_pcie_device_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) bool supports_clkreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) bool enable_cdm_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bool link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) bool update_fc_fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u8 init_link_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 msi_ctrl_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u32 num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u32 cfg_link_cap_l1sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u32 pcie_cap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u32 aspm_cmrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u32 aspm_pwr_on_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u32 aspm_l0s_enter_lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct regulator *pex_ctl_supply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct regulator *slot_ctl_3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct regulator *slot_ctl_12v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned int phy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct phy **phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct dentry *debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Endpoint mode specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct gpio_desc *pex_rst_gpiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct gpio_desc *pex_refclk_sel_gpiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int pex_rst_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int ep_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct tegra_pcie_dw_of_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) enum dw_pcie_device_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return container_of(pci, struct tegra_pcie_dw, pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) const u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) writel_relaxed(value, pcie->appl_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return readl_relaxed(pcie->appl_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct tegra_pcie_soc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) enum dw_pcie_device_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void apply_bad_link_workaround(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u32 current_link_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * NOTE:- Since this scenario is uncommon and link as such is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * stable anyway, not waiting to confirm if link is really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * transitioning to Gen-2 speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (val & PCI_EXP_LNKSTA_LBMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) PCI_EXP_LNKSTA_NLW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (pcie->init_link_width > current_link_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) PCI_EXP_LNKCTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) val &= ~PCI_EXP_LNKCTL2_TLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) PCI_EXP_LNKCTL2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) PCI_EXP_LNKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) val |= PCI_EXP_LNKCTL_RL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) PCI_EXP_LNKCTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct tegra_pcie_dw *pcie = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct dw_pcie *pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) u32 val, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u16 val_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) val = appl_readl(pcie, APPL_INTR_STATUS_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* SBR & Surprise Link Down WAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) val |= PORT_LOGIC_SPEED_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (val & APPL_INTR_STATUS_L0_INT_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) appl_writel(pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) APPL_INTR_STATUS_L1_8_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) apply_bad_link_workaround(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) appl_writel(pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) APPL_INTR_STATUS_L1_8_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) PCI_EXP_LNKSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) PCI_EXP_LNKSTA_CLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) val = appl_readl(pcie, APPL_INTR_STATUS_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dev_info(pci->dev, "CDM check complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dev_err(pci->dev, "CDM comparison mismatch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_err(pci->dev, "CDM Logic error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) val |= APPL_CTRL_LTSSM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct tegra_pcie_dw *pcie = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct dw_pcie *pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) u32 val, speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) PCI_EXP_LNKSTA_CLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* If EP doesn't advertise L1SS, just return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* Check if BME is set to '1' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (val & PCI_COMMAND_MASTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* 110us for both snoop and no-snoop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) val |= (val << LTR_MST_NO_SNOOP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) appl_writel(pcie, val, APPL_LTR_MSG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Send LTR upstream */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) val = appl_readl(pcie, APPL_LTR_MSG_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) appl_writel(pcie, val, APPL_LTR_MSG_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) val = appl_readl(pcie, APPL_LTR_MSG_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (ktime_after(ktime_get(), timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dev_err(pcie->dev, "Failed to send LTR message\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct tegra_pcie_dw *pcie = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct dw_pcie_ep *ep = &pcie->pci.ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int spurious = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u32 status_l0, status_l1, link_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pex_ep_event_hot_rst_done(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) link_status = appl_readl(pcie, APPL_LINK_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dev_dbg(pcie->dev, "Link is up with Host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) dw_pcie_ep_linkup(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) spurious = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) spurious = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (spurious) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) status_l0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * This is an endpoint mode specific register happen to appear even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * when controller is operating in root port mode and system hangs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * when it is accessed with link being in ASPM-L1 state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * So skip accessing it altogether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) *val = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return pci_generic_config_read(bus, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * This is an endpoint mode specific register happen to appear even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * when controller is operating in root port mode and system hangs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * when it is accessed with link being in ASPM-L1 state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * So skip accessing it altogether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return pci_generic_config_write(bus, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static struct pci_ops tegra_pci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .map_bus = dw_pcie_own_conf_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .read = tegra_pcie_dw_rd_own_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) .write = tegra_pcie_dw_wr_own_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #if defined(CONFIG_PCIEASPM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) val &= ~PCI_L1SS_CAP_ASPM_L1_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) val &= ~PCI_L1SS_CAP_ASPM_L1_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static int aspm_state_cnt(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dev_get_drvdata(s->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) seq_printf(s, "Tx L0s entry count : %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) seq_printf(s, "Rx L0s entry count : %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) seq_printf(s, "Link L1 entry count : %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) seq_printf(s, "Link L1.1 entry count : %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) seq_printf(s, "Link L1.2 entry count : %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Clear all counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) EVENT_COUNTER_ALL_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* Re-enable counting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static void init_host_aspm(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct dw_pcie *pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* Enable ASPM counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Program T_cmrt and T_pwr_on values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) val |= (pcie->aspm_cmrt << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) val |= (pcie->aspm_pwr_on_t << 19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* Program L0s and L1 entrance latencies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) val |= PORT_AFR_ENTER_ASPM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static void init_debugfs(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) aspm_state_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) u16 val_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) val = appl_readl(pcie, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) appl_writel(pcie, val, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (pcie->enable_cdm_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) val = appl_readl(pcie, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) appl_writel(pcie, val, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) val = appl_readl(pcie, APPL_INTR_EN_L1_18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) appl_writel(pcie, val, APPL_INTR_EN_L1_18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) PCI_EXP_LNKSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) PCI_EXP_LNKSTA_NLW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) PCI_EXP_LNKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) val_w |= PCI_EXP_LNKCTL_LBMIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) val_w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* Enable legacy interrupt generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) val = appl_readl(pcie, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) val |= APPL_INTR_EN_L0_0_INT_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) appl_writel(pcie, val, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) val |= APPL_INTR_EN_L1_8_INTX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (IS_ENABLED(CONFIG_PCIEAER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) val |= APPL_INTR_EN_L1_8_AER_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) dw_pcie_msi_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* Enable MSI interrupt generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) val = appl_readl(pcie, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) appl_writel(pcie, val, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Clear interrupt statuses before enabling interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) tegra_pcie_enable_system_interrupts(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) tegra_pcie_enable_legacy_interrupts(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) tegra_pcie_enable_msi_interrupts(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct dw_pcie *pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u32 val, offset, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* Program init preset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) for (i = 0; i < pcie->num_lanes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) val |= GEN3_GEN4_EQ_PRESET_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) val |= (GEN3_GEN4_EQ_PRESET_INIT <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) offset = dw_pcie_find_ext_capability(pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) PCI_EXT_CAP_ID_PL_16GT) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) PCI_PL_16GT_LE_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) val = dw_pcie_readb_dbi(pci, offset + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) val |= GEN3_GEN4_EQ_PRESET_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) val |= (GEN3_GEN4_EQ_PRESET_INIT <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dw_pcie_writeb_dbi(pci, offset + i, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static void tegra_pcie_prepare_host(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* Enable as 0xFFFF0001 response for CRS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) AMBA_ERROR_RESPONSE_CRS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* Configure Max lane width from DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) val &= ~PCI_EXP_LNKCAP_MLW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) config_gen3_gen4_eq_presets(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) init_host_aspm(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (pcie->update_fc_fixup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dw_pcie_setup_rc(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* Assert RST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) val = appl_readl(pcie, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) val &= ~APPL_PINMUX_PEX_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) appl_writel(pcie, val, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* Enable LTSSM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) val |= APPL_CTRL_LTSSM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* De-assert RST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) val = appl_readl(pcie, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) val |= APPL_PINMUX_PEX_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) appl_writel(pcie, val, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static int tegra_pcie_dw_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) u32 val, tmp, offset, speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) pp->bridge->ops = &tegra_pci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) tegra_pcie_prepare_host(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (dw_pcie_wait_for_link(pci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * There are some endpoints which can't get the link up if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * root port has Data Link Feature (DLF) enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * on Scaled Flow Control and DLF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * So, need to confirm that is indeed the case here and attempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * link up once again with DLF disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) val = appl_readl(pcie, APPL_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) val &= APPL_DEBUG_LTSSM_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) tmp = appl_readl(pcie, APPL_LINK_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (!(val == 0x11 && !tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Link is down for all good reasons */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dev_info(pci->dev, "Link is down in DLL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dev_info(pci->dev, "Trying again with DLFE disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* Disable LTSSM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) val &= ~APPL_CTRL_LTSSM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) reset_control_assert(pcie->core_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) reset_control_deassert(pcie->core_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) val &= ~PCI_DLF_EXCHANGE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dw_pcie_writel_dbi(pci, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) tegra_pcie_prepare_host(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (dw_pcie_wait_for_link(pci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) PCI_EXP_LNKSTA_CLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) tegra_pcie_enable_interrupts(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return !!(val & PCI_EXP_LNKSTA_DLLLA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) pp->num_vectors = MAX_MSI_IRQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) enable_irq(pcie->pex_rst_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) disable_irq(pcie->pex_rst_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static const struct dw_pcie_ops tegra_dw_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .link_up = tegra_pcie_dw_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .start_link = tegra_pcie_dw_start_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) .stop_link = tegra_pcie_dw_stop_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .host_init = tegra_pcie_dw_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) .set_num_vectors = tegra_pcie_set_msi_vec_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) unsigned int phy_count = pcie->phy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) while (phy_count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) phy_power_off(pcie->phys[phy_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) phy_exit(pcie->phys[phy_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) for (i = 0; i < pcie->phy_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ret = phy_init(pcie->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) goto phy_power_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ret = phy_power_on(pcie->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) goto phy_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) phy_power_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) phy_power_off(pcie->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) phy_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) phy_exit(pcie->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct device_node *np = pcie->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) &pcie->aspm_pwr_on_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) &pcie->aspm_l0s_enter_lat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dev_info(pcie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) "Failed to read ASPM L0s Entrance latency: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ret = of_property_count_strings(np, "phy-names");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) pcie->phy_count = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) pcie->update_fc_fixup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) pcie->supports_clkreq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) pcie->enable_cdm_check =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) of_property_read_bool(np, "snps,enable-cdm-check");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (pcie->mode == DW_PCIE_RC_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* Endpoint mode specific DT entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (IS_ERR(pcie->pex_rst_gpiod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int err = PTR_ERR(pcie->pex_rst_gpiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) const char *level = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (err == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) level = KERN_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dev_printk(level, pcie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) dev_fmt("Failed to get PERST GPIO: %d\n"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) "nvidia,refclk-select",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) GPIOD_OUT_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) const char *level = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (err == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) level = KERN_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) dev_printk(level, pcie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) pcie->pex_refclk_sel_gpiod = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct mrq_uphy_response resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct tegra_bpmp_message msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct mrq_uphy_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /* Controller-5 doesn't need to have its state set by BPMP-FW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (pcie->cid == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) memset(&req, 0, sizeof(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) memset(&resp, 0, sizeof(resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) req.controller_state.pcie_controller = pcie->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) req.controller_state.enable = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) memset(&msg, 0, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) msg.mrq = MRQ_UPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) msg.tx.data = &req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) msg.tx.size = sizeof(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) msg.rx.data = &resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) msg.rx.size = sizeof(resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return tegra_bpmp_transfer(pcie->bpmp, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct mrq_uphy_response resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct tegra_bpmp_message msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct mrq_uphy_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) memset(&req, 0, sizeof(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) memset(&resp, 0, sizeof(resp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) memset(&msg, 0, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) msg.mrq = MRQ_UPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) msg.tx.data = &req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) msg.tx.size = sizeof(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) msg.rx.data = &resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) msg.rx.size = sizeof(resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return tegra_bpmp_transfer(pcie->bpmp, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct pcie_port *pp = &pcie->pci.pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct pci_bus *child, *root_bus = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * link doesn't go into L2 state with some of the endpoints with Tegra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * if they are not in D0 state. So, need to make sure that immediate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * downstream devices are in D0 state before sending PME_TurnOff to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * link into L2 state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * 5.2 Link State Power Management (Page #428).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) list_for_each_entry(child, &pp->bridge->bus->children, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* Bring downstream devices to D0 if they are not already in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (child->parent == pp->bridge->bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) root_bus = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (!root_bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) dev_err(pcie->dev, "Failed to find downstream devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) list_for_each_entry(pdev, &root_bus->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (PCI_SLOT(pdev->devfn) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (pci_set_power_state(pdev, PCI_D0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) dev_err(pcie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) "Failed to transition %s to D0 state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (IS_ERR(pcie->slot_ctl_3v3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return PTR_ERR(pcie->slot_ctl_3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) pcie->slot_ctl_3v3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (IS_ERR(pcie->slot_ctl_12v)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return PTR_ERR(pcie->slot_ctl_12v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) pcie->slot_ctl_12v = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (pcie->slot_ctl_3v3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) ret = regulator_enable(pcie->slot_ctl_3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) dev_err(pcie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) "Failed to enable 3.3V slot supply: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (pcie->slot_ctl_12v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) ret = regulator_enable(pcie->slot_ctl_12v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) dev_err(pcie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) "Failed to enable 12V slot supply: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) goto fail_12v_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * According to PCI Express Card Electromechanical Specification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * should be a minimum of 100ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) fail_12v_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (pcie->slot_ctl_3v3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) regulator_disable(pcie->slot_ctl_3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (pcie->slot_ctl_12v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) regulator_disable(pcie->slot_ctl_12v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (pcie->slot_ctl_3v3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) regulator_disable(pcie->slot_ctl_3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) bool en_hw_hot_rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) dev_err(pcie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) "Failed to enable controller %u: %d\n", pcie->cid, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ret = tegra_pcie_enable_slot_regulators(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto fail_slot_reg_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ret = regulator_enable(pcie->pex_ctl_supply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) goto fail_reg_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) ret = clk_prepare_enable(pcie->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) goto fail_core_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) ret = reset_control_deassert(pcie->core_apb_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) goto fail_core_apb_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (en_hw_hot_rst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /* Enable HW_HOT_RST mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) val |= APPL_CTRL_HW_HOT_RST_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) ret = tegra_pcie_enable_phy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) goto fail_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) /* Update CFG base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) APPL_CFG_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Configure this core for RP mode operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) val = appl_readl(pcie, APPL_CFG_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) appl_writel(pcie, val, APPL_CFG_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!pcie->supports_clkreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) val = appl_readl(pcie, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) appl_writel(pcie, val, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /* Update iATU_DMA base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) appl_writel(pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) APPL_CFG_IATU_DMA_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) reset_control_deassert(pcie->core_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) PCI_CAP_ID_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (!pcie->supports_clkreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) disable_aspm_l11(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) disable_aspm_l12(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) fail_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) reset_control_assert(pcie->core_apb_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) fail_core_apb_rst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) clk_disable_unprepare(pcie->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) fail_core_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) regulator_disable(pcie->pex_ctl_supply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) fail_reg_en:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) tegra_pcie_disable_slot_regulators(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) fail_slot_reg_en:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) tegra_pcie_bpmp_set_ctrl_state(pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static int __deinit_controller(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) ret = reset_control_assert(pcie->core_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) tegra_pcie_disable_phy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ret = reset_control_assert(pcie->core_apb_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) clk_disable_unprepare(pcie->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ret = regulator_disable(pcie->pex_ctl_supply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) tegra_pcie_disable_slot_regulators(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) pcie->cid, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct dw_pcie *pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) ret = tegra_pcie_config_controller(pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) pp->ops = &tegra_pcie_dw_host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) ret = dw_pcie_host_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) goto fail_host_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) fail_host_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return __deinit_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (!tegra_pcie_dw_link_up(&pcie->pci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) val = appl_readl(pcie, APPL_RADM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) val |= APPL_PM_XMT_TURNOFF_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) appl_writel(pcie, val, APPL_RADM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 1, PME_ACK_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (!tegra_pcie_dw_link_up(&pcie->pci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) dev_dbg(pcie->dev, "PCIe link is not up...!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (tegra_pcie_try_link_l2(pcie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) dev_info(pcie->dev, "Link didn't transition to L2 state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * TX lane clock freq will reset to Gen1 only if link is in L2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * or detect state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * So apply pex_rst to end point to force RP to go into detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) data = appl_readl(pcie, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) data &= ~APPL_PINMUX_PEX_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) appl_writel(pcie, data, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) ((data &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) APPL_DEBUG_LTSSM_STATE_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) APPL_DEBUG_LTSSM_STATE_SHIFT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) LTSSM_STATE_PRE_DETECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 1, LTSSM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) dev_info(pcie->dev, "Link didn't go to detect state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) /* Disable LTSSM after link is in detect state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) data = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) data &= ~APPL_CTRL_LTSSM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) appl_writel(pcie, data, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * DBI registers may not be accessible after this as PLL-E would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * down depending on how CLKREQ is pulled by end point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) data = appl_readl(pcie, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /* Cut REFCLK to slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) appl_writel(pcie, data, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) tegra_pcie_downstream_dev_to_D0(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) dw_pcie_host_deinit(&pcie->pci.pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) tegra_pcie_dw_pme_turnoff(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return __deinit_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct pcie_port *pp = &pcie->pci.pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (!pp->msi_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) dev_err(dev, "Failed to get MSI interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) goto fail_pm_get_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ret = pinctrl_pm_select_default_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) goto fail_pm_get_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) tegra_pcie_init_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (!pcie->link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ret = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) goto fail_host_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) goto fail_host_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) pcie->debugfs = debugfs_create_dir(name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) init_debugfs(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) fail_host_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) tegra_pcie_deinit_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) fail_pm_get_sync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (pcie->ep_state == EP_STATE_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /* Disable LTSSM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) val &= ~APPL_CTRL_LTSSM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) APPL_DEBUG_LTSSM_STATE_SHIFT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) LTSSM_STATE_PRE_DETECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 1, LTSSM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) reset_control_assert(pcie->core_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) tegra_pcie_disable_phy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) reset_control_assert(pcie->core_apb_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) clk_disable_unprepare(pcie->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) pm_runtime_put_sync(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) pcie->ep_state = EP_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) struct dw_pcie *pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct dw_pcie_ep *ep = &pci->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (pcie->ep_state == EP_STATE_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) ret = pm_runtime_resume_and_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) goto fail_pll_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) ret = clk_prepare_enable(pcie->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) dev_err(dev, "Failed to enable core clock: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) goto fail_core_clk_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) ret = reset_control_deassert(pcie->core_apb_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) goto fail_core_apb_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) ret = tegra_pcie_enable_phy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) dev_err(dev, "Failed to enable PHY: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) goto fail_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /* Clear any stale interrupt statuses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /* configure this core for EP mode operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) val = appl_readl(pcie, APPL_DM_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) val &= ~APPL_DM_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) val |= APPL_DM_TYPE_EP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) appl_writel(pcie, val, APPL_DM_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) val |= APPL_CTRL_SYS_PRE_DET_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) val |= APPL_CTRL_HW_HOT_RST_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) val = appl_readl(pcie, APPL_CFG_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) val |= APPL_CFG_MISC_SLV_EP_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) appl_writel(pcie, val, APPL_CFG_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) val = appl_readl(pcie, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) appl_writel(pcie, val, APPL_PINMUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) APPL_CFG_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) appl_writel(pcie, pcie->atu_dma_res->start &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) APPL_CFG_IATU_DMA_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) val = appl_readl(pcie, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) appl_writel(pcie, val, APPL_INTR_EN_L0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) reset_control_deassert(pcie->core_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (pcie->update_fc_fixup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) config_gen3_gen4_eq_presets(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) init_host_aspm(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (!pcie->supports_clkreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) disable_aspm_l11(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) disable_aspm_l12(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) PCI_CAP_ID_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) ret = dw_pcie_ep_init_complete(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) dev_err(dev, "Failed to complete initialization: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) goto fail_init_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) dw_pcie_ep_init_notify(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /* Enable LTSSM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) val |= APPL_CTRL_LTSSM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) pcie->ep_state = EP_STATE_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) dev_dbg(dev, "Initialization of endpoint is completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) fail_init_complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) reset_control_assert(pcie->core_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) tegra_pcie_disable_phy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) fail_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) reset_control_assert(pcie->core_apb_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) fail_core_apb_rst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) clk_disable_unprepare(pcie->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) fail_core_clk_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) tegra_pcie_bpmp_set_pll_state(pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) fail_pll_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct tegra_pcie_dw *pcie = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (gpiod_get_value(pcie->pex_rst_gpiod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) pex_ep_event_pex_rst_assert(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) pex_ep_event_pex_rst_deassert(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* Tegra194 supports only INTA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (irq > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) appl_writel(pcie, 1, APPL_LEGACY_INTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) appl_writel(pcie, 0, APPL_LEGACY_INTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (unlikely(irq > 31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) struct dw_pcie_ep *ep = &pcie->pci.ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) writel(irq, ep->msi_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) enum pci_epc_irq_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) u16 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) case PCI_EPC_IRQ_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) case PCI_EPC_IRQ_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) case PCI_EPC_IRQ_MSIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) dev_err(pci->dev, "Unknown IRQ type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) static const struct pci_epc_features tegra_pcie_epc_features = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) .linkup_notifier = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) .core_init_notifier = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) .msi_capable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) .msix_capable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) .bar_fixed_64bit = 1 << BAR_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) .bar_fixed_size[0] = SZ_1M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static const struct pci_epc_features*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return &tegra_pcie_epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) static struct dw_pcie_ep_ops pcie_ep_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) .raise_irq = tegra_pcie_ep_raise_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) .get_features = tegra_pcie_ep_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct dw_pcie *pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct dw_pcie_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) ep = &pci->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) ep->ops = &pcie_ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) ep->phys_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) ep->addr_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) ep->page_size = SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) ret = gpiod_to_irq(pcie->pex_rst_gpiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) pcie->pex_rst_irq = (unsigned int)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) pcie->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) dev_err(dev, "Failed to create PERST IRQ string\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) pcie->ep_state = EP_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) tegra_pcie_ep_pex_rst_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) IRQF_TRIGGER_RISING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) name, (void *)pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) pcie->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) dev_err(dev, "Failed to create PCIe EP work thread string\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) ret = dw_pcie_ep_init(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) static int tegra_pcie_dw_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) const struct tegra_pcie_dw_of_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) struct resource *atu_dma_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct tegra_pcie_dw *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct resource *dbi_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) struct pcie_port *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct phy **phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) data = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (!pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) pci = &pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) pci->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) pci->ops = &tegra_dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) pci->n_fts[0] = N_FTS_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) pci->n_fts[1] = FTS_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) pcie->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) pcie->mode = (enum dw_pcie_device_mode)data->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ret = tegra_pcie_dw_parse_dt(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) const char *level = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) level = KERN_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) dev_printk(level, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) dev_fmt("Failed to parse device tree: %d\n"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) ret = tegra_pcie_get_slot_regulators(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) const char *level = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) level = KERN_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) dev_printk(level, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) dev_fmt("Failed to get slot regulators: %d\n"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (pcie->pex_refclk_sel_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (IS_ERR(pcie->pex_ctl_supply)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) ret = PTR_ERR(pcie->pex_ctl_supply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) dev_err(dev, "Failed to get regulator: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) PTR_ERR(pcie->pex_ctl_supply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) pcie->core_clk = devm_clk_get(dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (IS_ERR(pcie->core_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) dev_err(dev, "Failed to get core clock: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) PTR_ERR(pcie->core_clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return PTR_ERR(pcie->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) "appl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (!pcie->appl_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) dev_err(dev, "Failed to find \"appl\" region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (IS_ERR(pcie->appl_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return PTR_ERR(pcie->appl_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (IS_ERR(pcie->core_apb_rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) dev_err(dev, "Failed to get APB reset: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) PTR_ERR(pcie->core_apb_rst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return PTR_ERR(pcie->core_apb_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (!phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) for (i = 0; i < pcie->phy_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) name = kasprintf(GFP_KERNEL, "p2u-%u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) dev_err(dev, "Failed to create P2U string\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) phys[i] = devm_phy_get(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (IS_ERR(phys[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) ret = PTR_ERR(phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) dev_err(dev, "Failed to get PHY: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) pcie->phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (!dbi_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) dev_err(dev, "Failed to find \"dbi\" region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) pcie->dbi_res = dbi_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (IS_ERR(pci->dbi_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) return PTR_ERR(pci->dbi_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* Tegra HW locates DBI2 at a fixed offset from DBI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) pci->dbi_base2 = pci->dbi_base + 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) "atu_dma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (!atu_dma_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) dev_err(dev, "Failed to find \"atu_dma\" region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) pcie->atu_dma_res = atu_dma_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (IS_ERR(pci->atu_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return PTR_ERR(pci->atu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) pcie->core_rst = devm_reset_control_get(dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (IS_ERR(pcie->core_rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) dev_err(dev, "Failed to get core reset: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) PTR_ERR(pcie->core_rst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return PTR_ERR(pcie->core_rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) pp->irq = platform_get_irq_byname(pdev, "intr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (pp->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) return pp->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) pcie->bpmp = tegra_bpmp_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (IS_ERR(pcie->bpmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return PTR_ERR(pcie->bpmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) switch (pcie->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) case DW_PCIE_RC_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) IRQF_SHARED, "tegra-pcie-intr", pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) ret = tegra_pcie_config_rp(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (ret && ret != -ENOMEDIUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) case DW_PCIE_EP_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) ret = devm_request_threaded_irq(dev, pp->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) tegra_pcie_ep_hard_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) tegra_pcie_ep_irq_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) IRQF_SHARED | IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) "tegra-pcie-ep-intr", pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) ret = tegra_pcie_config_ep(pcie, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) tegra_bpmp_put(pcie->bpmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static int tegra_pcie_dw_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (!pcie->link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) debugfs_remove_recursive(pcie->debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) tegra_pcie_deinit_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) pm_runtime_put_sync(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) pm_runtime_disable(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) tegra_bpmp_put(pcie->bpmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (pcie->pex_refclk_sel_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) static int tegra_pcie_dw_suspend_late(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (!pcie->link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) /* Enable HW_HOT_RST mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) val |= APPL_CTRL_HW_HOT_RST_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) static int tegra_pcie_dw_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (!pcie->link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /* Save MSI interrupt vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) PORT_LOGIC_MSI_CTRL_INT_0_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) tegra_pcie_downstream_dev_to_D0(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) tegra_pcie_dw_pme_turnoff(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return __deinit_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) static int tegra_pcie_dw_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if (!pcie->link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) ret = tegra_pcie_config_controller(pcie, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) dev_err(dev, "Failed to init host: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) goto fail_host_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) /* Restore MSI interrupt vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) pcie->msi_ctrl_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) fail_host_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return __deinit_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) static int tegra_pcie_dw_resume_early(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (!pcie->link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) /* Disable HW_HOT_RST mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) val = appl_readl(pcie, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) val &= ~APPL_CTRL_HW_HOT_RST_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) appl_writel(pcie, val, APPL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (!pcie->link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) debugfs_remove_recursive(pcie->debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) tegra_pcie_downstream_dev_to_D0(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) disable_irq(pcie->pci.pp.irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) disable_irq(pcie->pci.pp.msi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) tegra_pcie_dw_pme_turnoff(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) __deinit_controller(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) .mode = DW_PCIE_RC_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) .mode = DW_PCIE_EP_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static const struct of_device_id tegra_pcie_dw_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) .compatible = "nvidia,tegra194-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) .data = &tegra_pcie_dw_rc_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) .compatible = "nvidia,tegra194-pcie-ep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) .data = &tegra_pcie_dw_ep_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) .suspend_late = tegra_pcie_dw_suspend_late,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) .suspend_noirq = tegra_pcie_dw_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) .resume_noirq = tegra_pcie_dw_resume_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) .resume_early = tegra_pcie_dw_resume_early,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static struct platform_driver tegra_pcie_dw_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) .probe = tegra_pcie_dw_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) .remove = tegra_pcie_dw_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) .shutdown = tegra_pcie_dw_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) .name = "tegra194-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) .pm = &tegra_pcie_dw_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) .of_match_table = tegra_pcie_dw_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) module_platform_driver(tegra_pcie_dw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) MODULE_LICENSE("GPL v2");