^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Qualcomm PCIe root complex driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2015 Linaro Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "../../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define PCIE20_PARF_SYS_CTRL 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define MST_WAKEUP_EN BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SLV_WAKEUP_EN BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define MSTR_ACLK_CGC_DIS BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SLV_ACLK_CGC_DIS BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define CORE_CLK_CGC_DIS BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define AUX_PWR_DET BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define L23_CLK_RMV_DIS BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define L1_CLK_RMV_DIS BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define PCIE20_PARF_PHY_CTRL 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define PCIE20_PARF_PHY_REFCLK 0x4C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PHY_REFCLK_SSP_EN BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PHY_REFCLK_USE_PAD BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PCIE20_PARF_DBI_BASE_ADDR 0x168
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define PCIE20_PARF_LTSSM 0x1B0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define PCIE20_PARF_SID_OFFSET 0x234
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define PCIE20_PARF_DEVICE_TYPE 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define PCIE20_ELBI_SYS_CTRL 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define CFG_BRIDGE_SB_INIT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PCIE_CAP_LINK1_VAL 0x2FD7F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PCIE20_PARF_Q2A_FLUSH 0x1AC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define PCIE20_MISC_CONTROL_1_REG 0x8BC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define DBI_RO_WR_EN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PERST_DELAY_US 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* PARF registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define PCIE20_PARF_PCS_DEEMPH 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define PCIE20_PARF_PCS_SWING 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define PCIE20_PARF_CONFIG_BITS 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define PHY_RX0_EQ(x) ((x) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define SLV_ADDR_SPACE_SZ 0x10000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define DEVICE_TYPE_RC 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct qcom_pcie_resources_2_1_0 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct reset_control *pci_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct reset_control *axi_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct reset_control *ahb_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct reset_control *por_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct reset_control *phy_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct reset_control *ext_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct qcom_pcie_resources_1_0_0 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct clk *iface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct clk *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct clk *master_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct clk *slave_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct reset_control *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct regulator *vdda;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct qcom_pcie_resources_2_3_2 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct clk *aux_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct clk *master_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct clk *slave_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct clk *cfg_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct clk *pipe_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct qcom_pcie_resources_2_4_0 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int num_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct reset_control *axi_m_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct reset_control *axi_s_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct reset_control *pipe_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct reset_control *axi_m_vmid_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct reset_control *axi_s_xpu_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct reset_control *parf_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct reset_control *phy_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct reset_control *axi_m_sticky_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct reset_control *pipe_sticky_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct reset_control *pwr_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct reset_control *ahb_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct reset_control *phy_ahb_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct qcom_pcie_resources_2_3_3 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct clk *iface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct clk *axi_m_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct clk *axi_s_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct clk *ahb_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct clk *aux_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct reset_control *rst[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct qcom_pcie_resources_2_7_0 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct clk_bulk_data clks[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct regulator_bulk_data supplies[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct reset_control *pci_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct clk *pipe_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) union qcom_pcie_resources {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct qcom_pcie_resources_1_0_0 v1_0_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct qcom_pcie_resources_2_1_0 v2_1_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct qcom_pcie_resources_2_3_2 v2_3_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct qcom_pcie_resources_2_3_3 v2_3_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct qcom_pcie_resources_2_4_0 v2_4_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct qcom_pcie_resources_2_7_0 v2_7_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct qcom_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct qcom_pcie_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int (*get_resources)(struct qcom_pcie *pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int (*init)(struct qcom_pcie *pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int (*post_init)(struct qcom_pcie *pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void (*deinit)(struct qcom_pcie *pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void (*post_deinit)(struct qcom_pcie *pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void (*ltssm_enable)(struct qcom_pcie *pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct qcom_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void __iomem *parf; /* DT parf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void __iomem *elbi; /* DT elbi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) union qcom_pcie_resources res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct gpio_desc *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) const struct qcom_pcie_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) gpiod_set_value_cansleep(pcie->reset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Ensure that PERST has been asserted for at least 100 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) gpiod_set_value_cansleep(pcie->reset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (dw_pcie_link_up(pci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Enable Link Training state machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (pcie->ops->ltssm_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pcie->ops->ltssm_enable(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return dw_pcie_wait_for_link(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* enable link training */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) res->supplies[0].supply = "vdda";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) res->supplies[1].supply = "vdda_phy";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) res->supplies[2].supply = "vdda_refclk";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) res->clks[0].id = "iface";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) res->clks[1].id = "core";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) res->clks[2].id = "phy";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) res->clks[3].id = "aux";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) res->clks[4].id = "ref";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* iface, core, phy are required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ret = devm_clk_bulk_get(dev, 3, res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* aux, ref are optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (IS_ERR(res->pci_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return PTR_ERR(res->pci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (IS_ERR(res->axi_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return PTR_ERR(res->axi_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (IS_ERR(res->ahb_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return PTR_ERR(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) res->por_reset = devm_reset_control_get_exclusive(dev, "por");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (IS_ERR(res->por_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return PTR_ERR(res->por_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (IS_ERR(res->ext_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return PTR_ERR(res->ext_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return PTR_ERR_OR_ZERO(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) reset_control_assert(res->pci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) reset_control_assert(res->axi_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) reset_control_assert(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) reset_control_assert(res->por_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) reset_control_assert(res->ext_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) reset_control_assert(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct device_node *node = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* reset the PCIe interface as uboot can leave it undefined state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) reset_control_assert(res->pci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) reset_control_assert(res->axi_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) reset_control_assert(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) reset_control_assert(res->por_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) reset_control_assert(res->ext_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) reset_control_assert(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) dev_err(dev, "cannot enable regulators\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ret = reset_control_deassert(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dev_err(dev, "cannot deassert ahb reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) goto err_deassert_ahb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ret = reset_control_deassert(res->ext_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dev_err(dev, "cannot deassert ext reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto err_deassert_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ret = reset_control_deassert(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev_err(dev, "cannot deassert phy reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto err_deassert_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ret = reset_control_deassert(res->pci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) dev_err(dev, "cannot deassert pci reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto err_deassert_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ret = reset_control_deassert(res->por_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) dev_err(dev, "cannot deassert por reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto err_deassert_por;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ret = reset_control_deassert(res->axi_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dev_err(dev, "cannot deassert axi reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) goto err_deassert_axi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) goto err_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* enable PCIe clocks and resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) val &= ~BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pcie->parf + PCIE20_PARF_PCS_DEEMPH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) writel(PCS_SWING_TX_SWING_FULL(120) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) PCS_SWING_TX_SWING_LOW(120),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) pcie->parf + PCIE20_PARF_PCS_SWING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* set TX termination offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* enable external reference clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* USE_PAD is required only for ipq806x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) val &= ~PHY_REFCLK_USE_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) val |= PHY_REFCLK_SSP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* wait for clock acquisition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) usleep_range(1000, 1500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Set the Max TLP size to 2K, instead of using default of 4K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) writel(CFG_BRIDGE_SB_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) err_clks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) reset_control_assert(res->axi_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) err_deassert_axi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) reset_control_assert(res->por_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) err_deassert_por:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) reset_control_assert(res->pci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) err_deassert_pci:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) reset_control_assert(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) err_deassert_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) reset_control_assert(res->ext_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) err_deassert_ext:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) reset_control_assert(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) err_deassert_ahb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) res->vdda = devm_regulator_get(dev, "vdda");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (IS_ERR(res->vdda))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return PTR_ERR(res->vdda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) res->iface = devm_clk_get(dev, "iface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (IS_ERR(res->iface))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return PTR_ERR(res->iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) res->aux = devm_clk_get(dev, "aux");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (IS_ERR(res->aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return PTR_ERR(res->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) res->master_bus = devm_clk_get(dev, "master_bus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (IS_ERR(res->master_bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return PTR_ERR(res->master_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) res->slave_bus = devm_clk_get(dev, "slave_bus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (IS_ERR(res->slave_bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return PTR_ERR(res->slave_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) res->core = devm_reset_control_get_exclusive(dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return PTR_ERR_OR_ZERO(res->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) reset_control_assert(res->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) clk_disable_unprepare(res->slave_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) clk_disable_unprepare(res->master_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) clk_disable_unprepare(res->iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) clk_disable_unprepare(res->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) regulator_disable(res->vdda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ret = reset_control_deassert(res->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dev_err(dev, "cannot deassert core reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ret = clk_prepare_enable(res->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) dev_err(dev, "cannot prepare/enable aux clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto err_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ret = clk_prepare_enable(res->iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dev_err(dev, "cannot prepare/enable iface clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto err_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ret = clk_prepare_enable(res->master_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dev_err(dev, "cannot prepare/enable master_bus clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto err_iface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ret = clk_prepare_enable(res->slave_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dev_err(dev, "cannot prepare/enable slave_bus clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto err_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = regulator_enable(res->vdda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dev_err(dev, "cannot enable vdda regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto err_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* change DBI base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) val |= BIT(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) err_slave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) clk_disable_unprepare(res->slave_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) err_master:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) clk_disable_unprepare(res->master_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) err_iface:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) clk_disable_unprepare(res->iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) err_aux:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) clk_disable_unprepare(res->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) err_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) reset_control_assert(res->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* enable link training */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) val = readl(pcie->parf + PCIE20_PARF_LTSSM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) val |= BIT(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) writel(val, pcie->parf + PCIE20_PARF_LTSSM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) res->supplies[0].supply = "vdda";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) res->supplies[1].supply = "vddpe-3v3";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) res->aux_clk = devm_clk_get(dev, "aux");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (IS_ERR(res->aux_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return PTR_ERR(res->aux_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) res->cfg_clk = devm_clk_get(dev, "cfg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (IS_ERR(res->cfg_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return PTR_ERR(res->cfg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) res->master_clk = devm_clk_get(dev, "bus_master");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (IS_ERR(res->master_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return PTR_ERR(res->master_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) res->slave_clk = devm_clk_get(dev, "bus_slave");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (IS_ERR(res->slave_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return PTR_ERR(res->slave_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) res->pipe_clk = devm_clk_get(dev, "pipe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return PTR_ERR_OR_ZERO(res->pipe_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) clk_disable_unprepare(res->slave_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) clk_disable_unprepare(res->master_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) clk_disable_unprepare(res->cfg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) clk_disable_unprepare(res->aux_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) clk_disable_unprepare(res->pipe_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) dev_err(dev, "cannot enable regulators\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ret = clk_prepare_enable(res->aux_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dev_err(dev, "cannot prepare/enable aux clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) goto err_aux_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ret = clk_prepare_enable(res->cfg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dev_err(dev, "cannot prepare/enable cfg clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) goto err_cfg_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) ret = clk_prepare_enable(res->master_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dev_err(dev, "cannot prepare/enable master clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) goto err_master_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ret = clk_prepare_enable(res->slave_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dev_err(dev, "cannot prepare/enable slave clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) goto err_slave_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* enable PCIe clocks and resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) val &= ~BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* change DBI base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* MAC PHY_POWERDOWN MUX DISABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) val &= ~BIT(29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) val |= BIT(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) val |= BIT(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) err_slave_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) clk_disable_unprepare(res->master_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) err_master_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) clk_disable_unprepare(res->cfg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) err_cfg_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) clk_disable_unprepare(res->aux_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) err_aux_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ret = clk_prepare_enable(res->pipe_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dev_err(dev, "cannot prepare/enable pipe clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) res->clks[0].id = "aux";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) res->clks[1].id = "master_bus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) res->clks[2].id = "slave_bus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) res->clks[3].id = "iface";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* qcom,pcie-ipq4019 is defined without "iface" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) res->num_clks = is_ipq ? 3 : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (IS_ERR(res->axi_m_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return PTR_ERR(res->axi_m_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (IS_ERR(res->axi_s_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return PTR_ERR(res->axi_s_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (is_ipq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * These resources relates to the PHY or are secure clocks, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * are controlled here for IPQ4019
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (IS_ERR(res->pipe_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return PTR_ERR(res->pipe_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) "axi_m_vmid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (IS_ERR(res->axi_m_vmid_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return PTR_ERR(res->axi_m_vmid_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) "axi_s_xpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (IS_ERR(res->axi_s_xpu_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return PTR_ERR(res->axi_s_xpu_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (IS_ERR(res->parf_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return PTR_ERR(res->parf_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (IS_ERR(res->phy_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return PTR_ERR(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) "axi_m_sticky");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (IS_ERR(res->axi_m_sticky_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return PTR_ERR(res->axi_m_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) "pipe_sticky");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (IS_ERR(res->pipe_sticky_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return PTR_ERR(res->pipe_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (IS_ERR(res->pwr_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return PTR_ERR(res->pwr_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (IS_ERR(res->ahb_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return PTR_ERR(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (is_ipq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (IS_ERR(res->phy_ahb_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return PTR_ERR(res->phy_ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) reset_control_assert(res->axi_m_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) reset_control_assert(res->axi_s_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) reset_control_assert(res->pipe_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) reset_control_assert(res->pipe_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) reset_control_assert(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) reset_control_assert(res->phy_ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) reset_control_assert(res->axi_m_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) reset_control_assert(res->pwr_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) reset_control_assert(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) clk_bulk_disable_unprepare(res->num_clks, res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ret = reset_control_assert(res->axi_m_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dev_err(dev, "cannot assert axi master reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ret = reset_control_assert(res->axi_s_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) dev_err(dev, "cannot assert axi slave reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) usleep_range(10000, 12000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ret = reset_control_assert(res->pipe_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) dev_err(dev, "cannot assert pipe reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret = reset_control_assert(res->pipe_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) dev_err(dev, "cannot assert pipe sticky reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ret = reset_control_assert(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) dev_err(dev, "cannot assert phy reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ret = reset_control_assert(res->phy_ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) dev_err(dev, "cannot assert phy ahb reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) usleep_range(10000, 12000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ret = reset_control_assert(res->axi_m_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dev_err(dev, "cannot assert axi master sticky reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ret = reset_control_assert(res->pwr_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dev_err(dev, "cannot assert power reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ret = reset_control_assert(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dev_err(dev, "cannot assert ahb reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) usleep_range(10000, 12000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ret = reset_control_deassert(res->phy_ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dev_err(dev, "cannot deassert phy ahb reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ret = reset_control_deassert(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) dev_err(dev, "cannot deassert phy reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) goto err_rst_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ret = reset_control_deassert(res->pipe_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dev_err(dev, "cannot deassert pipe reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) goto err_rst_pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) ret = reset_control_deassert(res->pipe_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) dev_err(dev, "cannot deassert pipe sticky reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) goto err_rst_pipe_sticky;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) usleep_range(10000, 12000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ret = reset_control_deassert(res->axi_m_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dev_err(dev, "cannot deassert axi master reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) goto err_rst_axi_m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = reset_control_deassert(res->axi_m_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dev_err(dev, "cannot deassert axi master sticky reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto err_rst_axi_m_sticky;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ret = reset_control_deassert(res->axi_s_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev_err(dev, "cannot deassert axi slave reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) goto err_rst_axi_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ret = reset_control_deassert(res->pwr_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) dev_err(dev, "cannot deassert power reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) goto err_rst_pwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ret = reset_control_deassert(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dev_err(dev, "cannot deassert ahb reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) goto err_rst_ahb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) usleep_range(10000, 12000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) goto err_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* enable PCIe clocks and resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) val &= ~BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* change DBI base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* MAC PHY_POWERDOWN MUX DISABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) val &= ~BIT(29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) val |= BIT(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) val |= BIT(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) err_clks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) reset_control_assert(res->ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) err_rst_ahb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) reset_control_assert(res->pwr_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) err_rst_pwr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) reset_control_assert(res->axi_s_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) err_rst_axi_s:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) reset_control_assert(res->axi_m_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) err_rst_axi_m_sticky:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) reset_control_assert(res->axi_m_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) err_rst_axi_m:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) reset_control_assert(res->pipe_sticky_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) err_rst_pipe_sticky:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) reset_control_assert(res->pipe_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) err_rst_pipe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) reset_control_assert(res->phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) err_rst_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) reset_control_assert(res->phy_ahb_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) const char *rst_names[] = { "axi_m", "axi_s", "pipe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) "axi_m_sticky", "sticky",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) "ahb", "sleep", };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) res->iface = devm_clk_get(dev, "iface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (IS_ERR(res->iface))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return PTR_ERR(res->iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) res->axi_m_clk = devm_clk_get(dev, "axi_m");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (IS_ERR(res->axi_m_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return PTR_ERR(res->axi_m_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) res->axi_s_clk = devm_clk_get(dev, "axi_s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (IS_ERR(res->axi_s_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return PTR_ERR(res->axi_s_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) res->ahb_clk = devm_clk_get(dev, "ahb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (IS_ERR(res->ahb_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return PTR_ERR(res->ahb_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) res->aux_clk = devm_clk_get(dev, "aux");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (IS_ERR(res->aux_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return PTR_ERR(res->aux_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (IS_ERR(res->rst[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return PTR_ERR(res->rst[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) clk_disable_unprepare(res->iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) clk_disable_unprepare(res->axi_m_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) clk_disable_unprepare(res->axi_s_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) clk_disable_unprepare(res->ahb_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) clk_disable_unprepare(res->aux_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ret = reset_control_assert(res->rst[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) usleep_range(2000, 2500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) ret = reset_control_deassert(res->rst[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) dev_err(dev, "reset #%d deassert failed (%d)\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * Don't have a way to see if the reset has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Wait for some time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) usleep_range(2000, 2500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) ret = clk_prepare_enable(res->iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) dev_err(dev, "cannot prepare/enable core clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) goto err_clk_iface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ret = clk_prepare_enable(res->axi_m_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) dev_err(dev, "cannot prepare/enable core clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) goto err_clk_axi_m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ret = clk_prepare_enable(res->axi_s_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) dev_err(dev, "cannot prepare/enable axi slave clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto err_clk_axi_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ret = clk_prepare_enable(res->ahb_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) dev_err(dev, "cannot prepare/enable ahb clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) goto err_clk_ahb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret = clk_prepare_enable(res->aux_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) dev_err(dev, "cannot prepare/enable aux clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) goto err_clk_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) writel(SLV_ADDR_SPACE_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) val &= ~BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) pcie->parf + PCIE20_PARF_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) val &= ~PCI_EXP_LNKCAP_ASPMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) PCI_EXP_DEVCTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) err_clk_aux:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) clk_disable_unprepare(res->ahb_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) err_clk_ahb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) clk_disable_unprepare(res->axi_s_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) err_clk_axi_s:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) clk_disable_unprepare(res->axi_m_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) err_clk_axi_m:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) clk_disable_unprepare(res->iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) err_clk_iface:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * Not checking for failure, will anyway return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * the original failure in 'ret'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) for (i = 0; i < ARRAY_SIZE(res->rst); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) reset_control_assert(res->rst[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (IS_ERR(res->pci_reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return PTR_ERR(res->pci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) res->supplies[0].supply = "vdda";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) res->supplies[1].supply = "vddpe-3v3";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) res->clks[0].id = "aux";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) res->clks[1].id = "cfg";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) res->clks[2].id = "bus_master";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) res->clks[3].id = "bus_slave";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) res->clks[4].id = "slave_q2a";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) res->clks[5].id = "tbu";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) res->pipe_clk = devm_clk_get(dev, "pipe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return PTR_ERR_OR_ZERO(res->pipe_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct dw_pcie *pci = pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) dev_err(dev, "cannot enable regulators\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto err_disable_regulators;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ret = reset_control_assert(res->pci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) dev_err(dev, "cannot deassert pci reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) goto err_disable_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) usleep_range(1000, 1500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ret = reset_control_deassert(res->pci_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) dev_err(dev, "cannot deassert pci reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) goto err_disable_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ret = clk_prepare_enable(res->pipe_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dev_err(dev, "cannot prepare/enable pipe clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) goto err_disable_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* configure PCIe to RC mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* enable PCIe clocks and resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) val &= ~BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* change DBI base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /* MAC PHY_POWERDOWN MUX DISABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) val &= ~BIT(29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) val |= BIT(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) val |= BIT(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) err_disable_clocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) err_disable_regulators:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return clk_prepare_enable(res->pipe_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) clk_disable_unprepare(res->pipe_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static int qcom_pcie_link_up(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return !!(val & PCI_EXP_LNKSTA_DLLLA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static int qcom_pcie_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct qcom_pcie *pcie = to_qcom_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) qcom_ep_reset_assert(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ret = pcie->ops->init(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ret = phy_power_on(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) goto err_deinit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (pcie->ops->post_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ret = pcie->ops->post_init(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) goto err_disable_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) dw_pcie_setup_rc(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dw_pcie_msi_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) qcom_ep_reset_deassert(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ret = qcom_pcie_establish_link(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) qcom_ep_reset_assert(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (pcie->ops->post_deinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) pcie->ops->post_deinit(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) err_disable_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) phy_power_off(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) err_deinit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) pcie->ops->deinit(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) .host_init = qcom_pcie_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static const struct qcom_pcie_ops ops_2_1_0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) .get_resources = qcom_pcie_get_resources_2_1_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .init = qcom_pcie_init_2_1_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .deinit = qcom_pcie_deinit_2_1_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static const struct qcom_pcie_ops ops_1_0_0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) .get_resources = qcom_pcie_get_resources_1_0_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) .init = qcom_pcie_init_1_0_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .deinit = qcom_pcie_deinit_1_0_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static const struct qcom_pcie_ops ops_2_3_2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .get_resources = qcom_pcie_get_resources_2_3_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .init = qcom_pcie_init_2_3_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) .post_init = qcom_pcie_post_init_2_3_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) .deinit = qcom_pcie_deinit_2_3_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) .post_deinit = qcom_pcie_post_deinit_2_3_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static const struct qcom_pcie_ops ops_2_4_0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) .get_resources = qcom_pcie_get_resources_2_4_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) .init = qcom_pcie_init_2_4_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) .deinit = qcom_pcie_deinit_2_4_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static const struct qcom_pcie_ops ops_2_3_3 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) .get_resources = qcom_pcie_get_resources_2_3_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .init = qcom_pcie_init_2_3_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .deinit = qcom_pcie_deinit_2_3_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static const struct qcom_pcie_ops ops_2_7_0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) .get_resources = qcom_pcie_get_resources_2_7_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) .init = qcom_pcie_init_2_7_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) .deinit = qcom_pcie_deinit_2_7_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) .post_init = qcom_pcie_post_init_2_7_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) .post_deinit = qcom_pcie_post_deinit_2_7_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static const struct dw_pcie_ops dw_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) .link_up = qcom_pcie_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) static int qcom_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct pcie_port *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct qcom_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (!pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (!pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) pci->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) pci->ops = &dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) pcie->pci = pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) pcie->ops = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (IS_ERR(pcie->reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ret = PTR_ERR(pcie->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (IS_ERR(pcie->parf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ret = PTR_ERR(pcie->parf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (IS_ERR(pci->dbi_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) ret = PTR_ERR(pci->dbi_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (IS_ERR(pcie->elbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = PTR_ERR(pcie->elbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) pcie->phy = devm_phy_optional_get(dev, "pciephy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (IS_ERR(pcie->phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ret = PTR_ERR(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ret = pcie->ops->get_resources(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) pp->ops = &qcom_pcie_dw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) pp->msi_irq = platform_get_irq_byname(pdev, "msi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (pp->msi_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) ret = pp->msi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ret = phy_init(pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ret = dw_pcie_host_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) dev_err(dev, "cannot initialize host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) goto err_pm_runtime_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) err_pm_runtime_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static const struct of_device_id qcom_pcie_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) { .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) static void qcom_fixup_class(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) dev->class = PCI_CLASS_BRIDGE_PCI << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static struct platform_driver qcom_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) .probe = qcom_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) .name = "qcom-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) .of_match_table = qcom_pcie_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) builtin_platform_driver(qcom_pcie_driver);