^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCIe host controller driver for Rockchip SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * http://www.rock-chips.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Simon Xue <xxm@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/phy/pcie.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/rfkill-wlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/pci-epf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "../../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "../rockchip-pcie-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "pcie-dw-dmatest.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) enum rk_pcie_device_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) RK_PCIE_EP_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) RK_PCIE_RC_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define RK_PCIE_DBG 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define PCIE_DMA_OFFSET 0x380000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define PCIE_DMA_CTRL_OFF 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define PCIE_DMA_WR_ENB 0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define PCIE_DMA_WR_CTRL_LO 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PCIE_DMA_WR_CTRL_HI 0x204
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define PCIE_DMA_WR_XFERSIZE 0x208
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PCIE_DMA_WR_SAR_PTR_LO 0x20c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define PCIE_DMA_WR_SAR_PTR_HI 0x210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define PCIE_DMA_WR_DAR_PTR_LO 0x214
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PCIE_DMA_WR_DAR_PTR_HI 0x218
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PCIE_DMA_WR_WEILO 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PCIE_DMA_WR_WEIHI 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define PCIE_DMA_WR_DOORBELL 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PCIE_DMA_WR_INT_STATUS 0x4c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define PCIE_DMA_WR_INT_MASK 0x54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define PCIE_DMA_WR_INT_CLEAR 0x58
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PCIE_DMA_RD_ENB 0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PCIE_DMA_RD_CTRL_LO 0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define PCIE_DMA_RD_CTRL_HI 0x304
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define PCIE_DMA_RD_XFERSIZE 0x308
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define PCIE_DMA_RD_SAR_PTR_LO 0x30c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define PCIE_DMA_RD_SAR_PTR_HI 0x310
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define PCIE_DMA_RD_DAR_PTR_LO 0x314
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define PCIE_DMA_RD_DAR_PTR_HI 0x318
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define PCIE_DMA_RD_WEILO 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define PCIE_DMA_RD_WEIHI 0x3c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define PCIE_DMA_RD_DOORBELL 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define PCIE_DMA_RD_INT_STATUS 0xa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define PCIE_DMA_RD_INT_MASK 0xa8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define PCIE_DMA_RD_INT_CLEAR 0xac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define PCIE_DMA_CHANEL_MAX_NUM 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Parameters for the waiting for iATU enabled routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define LINK_WAIT_IATU_MIN 9000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define LINK_WAIT_IATU_MAX 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define PCIE_DIRECT_SPEED_CHANGE (0x1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define PCIE_TYPE0_STATUS_COMMAND_REG 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define PCIE_TYPE0_BAR0_REG 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define PCIE_CAP_LINK_CONTROL2_LINK_STATUS 0xa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define PCIE_CLIENT_INTR_STATUS_MSG_RX 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define PME_TO_ACK (BIT(9) | BIT(25))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define PCIE_CLIENT_INTR_STATUS_MISC 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define UNMASK_ALL_LEGACY_INT 0xffff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define MASK_LEGACY_INT(x) (0x00110011 << x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define UNMASK_LEGACY_INT(x) (0x00110000 << x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define PCIE_CLIENT_INTR_MASK 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define PCIE_CLIENT_POWER 0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define READY_ENTER_L23 BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define PCIE_CLIENT_MSG_GEN 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define PME_TURN_OFF (BIT(4) | BIT(20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define PCIE_CLIENT_GENERAL_DEBUG 0x104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define PCIE_CLIENT_LTSSM_STATUS 0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define SMLH_LINKUP BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define RDLH_LINKUP BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0 0x328
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define PCIE_CLIENT_DBF_EN 0xffff0007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define PCIE_PHY_LINKUP BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define PCIE_DATA_LINKUP BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define PCIE_SB_BAR0_MASK_REG 0x100010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define PCIE_PL_ORDER_RULE_CTRL_OFF 0x8B4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define RK_PCIE_L2_TMOUT_US 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) enum rk_pcie_ltssm_code {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) S_L0 = 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) S_L0S = 0x12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) S_L1_IDLE = 0x14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) S_L2_IDLE = 0x15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) S_MAX = 0x1f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct rk_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) enum rk_pcie_device_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) enum phy_mode phy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int phy_sub_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned char bar_to_atu[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) phys_addr_t *outbound_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long *ib_window_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long *ob_window_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int num_ib_windows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned int num_ob_windows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void __iomem *dbi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void __iomem *apb_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct clk_bulk_data *clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct reset_control *rsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int clk_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct gpio_desc *rst_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 perst_inactive_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct gpio_desc *prsnt_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) phys_addr_t mem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) size_t mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct pcie_port pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct regmap *usb_pcie_grf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct regmap *pmu_grf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct dma_trx_obj *dma_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bool in_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) bool skip_scan_in_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bool is_rk1808;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) bool is_signal_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) bool bifurcation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bool supports_clkreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct regulator *vpcie3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct irq_domain *irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int legacy_parent_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) raw_spinlock_t intx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u16 aspm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u32 l1ss_ctl1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct dentry *debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct rk_pcie_of_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) enum rk_pcie_device_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define to_rk_pcie(x) dev_get_drvdata((x)->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if ((uintptr_t)addr & (size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (size == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *val = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } else if (size == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *val = readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) } else if (size == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *val = readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int rk_pcie_write(void __iomem *addr, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if ((uintptr_t)addr & (size - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (size == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) writel(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) else if (size == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) writew(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) else if (size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) writeb(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static u32 __rk_pcie_read_apb(struct rk_pcie *rk_pcie, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 reg, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ret = rk_pcie_read(base + reg, size, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dev_err(rk_pcie->pci->dev, "Read APB address failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void __rk_pcie_write_apb(struct rk_pcie *rk_pcie, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u32 reg, size_t size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ret = rk_pcie_write(base + reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) dev_err(rk_pcie->pci->dev, "Write APB address failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static inline u32 rk_pcie_readl_apb(struct rk_pcie *rk_pcie, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return __rk_pcie_read_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline void rk_pcie_writel_apb(struct rk_pcie *rk_pcie, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) __rk_pcie_write_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static u8 rk_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (val == 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (pci->ops->write_dbi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ret = dw_pcie_write(pci->atu_base + reg, 4, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dev_err(pci->dev, "Write ATU address failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rk_pcie_writel_atu(pci, offset + reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static u32 rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (pci->ops->read_dbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dev_err(pci->dev, "Read ATU address failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static u32 rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return rk_pcie_readl_atu(pci, offset + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static int rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int index, int bar, u64 cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) enum dw_pcie_as_type as_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u32 retries, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) lower_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) upper_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) switch (as_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) case DW_PCIE_AS_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) type = PCIE_ATU_TYPE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) case DW_PCIE_AS_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) type = PCIE_ATU_TYPE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) PCIE_ATU_FUNC_NUM(func_no));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) PCIE_ATU_FUNC_NUM_MATCH_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) PCIE_ATU_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Make sure ATU enable takes effect before any subsequent config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * and I/O accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) val = rk_pcie_readl_ib_unroll(pci, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) PCIE_ATU_UNR_REGION_CTRL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (val & PCIE_ATU_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) mdelay(LINK_WAIT_IATU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dev_err(pci->dev, "Inbound iATU is not being enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static int rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int bar, u64 cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) enum dw_pcie_as_type as_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) u32 retries, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (pci->iatu_unroll_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return rk_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) cpu_addr, as_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) switch (as_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case DW_PCIE_AS_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) type = PCIE_ATU_TYPE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case DW_PCIE_AS_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) type = PCIE_ATU_TYPE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) PCIE_ATU_FUNC_NUM(func_no));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) PCIE_ATU_FUNC_NUM_MATCH_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Make sure ATU enable takes effect before any subsequent config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * and I/O accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (val & PCIE_ATU_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) mdelay(LINK_WAIT_IATU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dev_err(pci->dev, "Inbound iATU is not being enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) enum pci_barno bar, dma_addr_t cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) enum dw_pcie_as_type as_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u32 free_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u8 func_no = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (rk_pcie->in_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) free_win = rk_pcie->bar_to_atu[bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) free_win = find_first_zero_bit(rk_pcie->ib_window_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) rk_pcie->num_ib_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (free_win >= rk_pcie->num_ib_windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dev_err(rk_pcie->pci->dev, "No free inbound window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ret = rk_pcie_prog_inbound_atu(rk_pcie->pci, func_no, free_win, bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) cpu_addr, as_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dev_err(rk_pcie->pci->dev, "Failed to program IB window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (rk_pcie->in_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rk_pcie->bar_to_atu[bar] = free_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) set_bit(free_win, rk_pcie->ib_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) rk_pcie_writel_atu(pci, offset + reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static u32 rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return rk_pcie_readl_atu(pci, offset + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int index, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) u64 cpu_addr, u64 pci_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) u32 retries, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) u64 limit_addr = cpu_addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) lower_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) upper_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) lower_32_bits(limit_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) upper_32_bits(limit_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) lower_32_bits(pci_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) upper_32_bits(pci_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) type | PCIE_ATU_FUNC_NUM(func_no));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) PCIE_ATU_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Make sure ATU enable takes effect before any subsequent config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * and I/O accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) val = rk_pcie_readl_ob_unroll(pci, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) PCIE_ATU_UNR_REGION_CTRL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (val & PCIE_ATU_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) mdelay(LINK_WAIT_IATU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dev_err(pci->dev, "Outbound iATU is not being enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int type, u64 cpu_addr, u64 pci_addr, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) u32 retries, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (pci->ops->cpu_addr_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (pci->iatu_unroll_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) rk_pcie_prog_outbound_atu_unroll(pci, 0x0, index, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) cpu_addr, pci_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) PCIE_ATU_REGION_OUTBOUND | index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) lower_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) upper_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) lower_32_bits(cpu_addr + size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) lower_32_bits(pci_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) upper_32_bits(pci_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) PCIE_ATU_FUNC_NUM(0x0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Make sure ATU enable takes effect before any subsequent config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * and I/O accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (val & PCIE_ATU_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) mdelay(LINK_WAIT_IATU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dev_err(pci->dev, "Outbound iATU is not being enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static int rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) phys_addr_t phys_addr, u64 pci_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u32 free_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (rk_pcie->in_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) free_win = find_first_bit(rk_pcie->ob_window_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rk_pcie->num_ob_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) free_win = find_first_zero_bit(rk_pcie->ob_window_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) rk_pcie->num_ob_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (free_win >= rk_pcie->num_ob_windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dev_err(rk_pcie->pci->dev, "No free outbound window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) rk_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) phys_addr, pci_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (rk_pcie->in_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) set_bit(free_win, rk_pcie->ob_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rk_pcie->outbound_addr[free_win] = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static void __rk_pcie_ep_reset_bar(struct rk_pcie *rk_pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) enum pci_barno bar, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) reg = PCI_BASE_ADDRESS_0 + (4 * bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dw_pcie_writel_dbi(rk_pcie->pci, reg, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static void rk_pcie_ep_reset_bar(struct rk_pcie *rk_pcie, enum pci_barno bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) __rk_pcie_ep_reset_bar(rk_pcie, bar, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static int rk_pcie_ep_atu_init(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) enum pci_barno bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) enum dw_pcie_as_type as_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) dma_addr_t cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) phys_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u64 pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) for (bar = BAR_0; bar <= BAR_5; bar++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) rk_pcie_ep_reset_bar(rk_pcie, bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) cpu_addr = rk_pcie->mem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) as_type = DW_PCIE_AS_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ret = rk_pcie_ep_inbound_atu(rk_pcie, BAR_0, cpu_addr, as_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) phys_addr = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) pci_addr = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) size = SZ_2G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret = rk_pcie_ep_outbound_atu(rk_pcie, phys_addr, pci_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #if defined(CONFIG_PCIEASPM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static void disable_aspm_l1ss(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) u32 val, cfg_link_cap_l1sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_L1SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (!val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dev_err(rk_pcie->pci->dev, "can't find l1ss cap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) val = dw_pcie_readl_dbi(rk_pcie->pci, cfg_link_cap_l1sub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) val &= ~(PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2 | PCI_L1SS_CAP_L1_PM_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dw_pcie_writel_dbi(rk_pcie->pci, cfg_link_cap_l1sub, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static inline void disable_aspm_l1ss(struct rk_pcie *rk_pcie) { return; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static inline void rk_pcie_set_mode(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) switch (rk_pcie->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) case RK_PCIE_EP_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) case RK_PCIE_RC_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (rk_pcie->supports_clkreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Application is ready to have reference clock removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x00010001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Pull down CLKREQ# to assert the connecting CLOCK_GEN OE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x30011000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) disable_aspm_l1ss(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00040);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * Disable order rule for CPL can't pass halted P queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * Need to check producer-consumer model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * Just for RK1808 platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (rk_pcie->is_rk1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dw_pcie_writel_dbi(rk_pcie->pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) PCIE_PL_ORDER_RULE_CTRL_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 0xff00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static inline void rk_pcie_link_status_clear(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static inline void rk_pcie_disable_ltssm(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rk_pcie_writel_apb(rk_pcie, 0x0, 0xc0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static inline void rk_pcie_enable_ltssm(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) rk_pcie_writel_apb(rk_pcie, 0x0, 0xC000C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static int rk_pcie_link_up(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct rk_pcie *rk_pcie = to_rk_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (rk_pcie->is_rk1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (!IS_ENABLED(CONFIG_DEBUG_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (rk_pcie->is_rk1808 == true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) PCIE_CLIENT_DBG_TRANSITION_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) PCIE_CLIENT_DBG_TRANSITION_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) PCIE_CLIENT_DBG_TRANSITION_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) PCIE_CLIENT_DBG_TRANSITION_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) PCIE_CLIENT_DBF_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) #if RK_PCIE_DBG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) u32 loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct dw_pcie *pci = rk_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) dev_info(pci->dev, "ltssm = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) for (loop = 0; loop < 64; loop++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) dev_info(pci->dev, "fifo_status = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static int rk_pcie_establish_link(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int retries, power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct rk_pcie *rk_pcie = to_rk_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * For standard RC, even if the link has been setup by firmware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * we still need to reset link as we need to remove all resource info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * from devices, for instance BAR, as it wasn't assigned by kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (dw_pcie_link_up(pci) && !std_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dev_err(pci->dev, "link is already up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Rest the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rk_pcie_disable_ltssm(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) rk_pcie_link_status_clear(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rk_pcie_enable_debug(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* Enable client reset or link down interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0x40000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* Enable LTSSM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) rk_pcie_enable_ltssm(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * In resume routine, function devices' resume function must be late after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * controllers'. Some devices, such as Wi-Fi, need special IO setting before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * finishing training. So there must be timeout here. These kinds of devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * need rescan devices by its driver when used. So no need to waste time waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * for training pass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (rk_pcie->in_suspend && rk_pcie->skip_scan_in_resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) rfkill_get_wifi_power_state(&power);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!power) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * PCIe requires the refclk to be stable for 100µs prior to releasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * PERST and T_PVPERL (Power stable to PERST# inactive) should be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * Card Electromechanical Specification 3.0. So 100ms in total is the min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * requuirement here. We add a 200ms by default for sake of hoping everthings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * work fine. If it doesn't, please add more in DT node by add rockchip,perst-inactive-ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) msleep(rk_pcie->perst_inactive_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * Add this 1ms delay because we observe link is always up stably after it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * could help us save 20ms for scanning devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) for (retries = 0; retries < 100; retries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (dw_pcie_link_up(pci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * We may be here in case of L0 in Gen1. But if EP is capable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * of Gen2 or Gen3, Gen switch may happen just in this time, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * we keep on accessing devices in unstable link status. Given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * that LTSSM max timeout is 24ms per period, we can wait a bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * more for Gen switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) rk_pcie_debug_dump(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) rk_pcie_debug_dump(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) dev_err(pci->dev, "PCIe Link Fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return rk_pcie->is_signal_test == true ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static bool rk_pcie_udma_enabled(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) PCIE_DMA_CTRL_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int rk_pcie_init_dma_trx(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!rk_pcie_udma_enabled(rk_pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) rk_pcie->dma_obj = rk_pcie_dma_obj_probe(rk_pcie->pci->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (IS_ERR(rk_pcie->dma_obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dev_err(rk_pcie->pci->dev, "failed to prepare dma object\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (IS_ERR(rk_pcie->dma_obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) dev_err(rk_pcie->pci->dev, "failed to prepare dmatest\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Enable client write and read interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* Enable core write interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* Enable core read interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static int rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int pos = PCI_CFG_SPACE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) int cap = PCI_EXT_CAP_ID_REBAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /* minimum 8 bytes per capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * If we have no capabilities, this is indicated by cap ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * cap version and next pointer all being 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (header == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) while (ttl-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (PCI_EXT_CAP_ID(header) == cap && pos != start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) pos = PCI_EXT_CAP_NEXT(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (pos < PCI_CFG_SPACE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (pci->ops && pci->ops->write_dbi2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) dev_err(pci->dev, "write DBI address failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static int rk_pcie_ep_set_bar_flag(struct rk_pcie *rk_pcie, enum pci_barno barno, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) enum pci_barno bar = barno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) reg = PCI_BASE_ADDRESS_0 + (4 * bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /* Disabled the upper 32bits BAR to make a 64bits bar pair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) dw_pcie_writel_dbi2(rk_pcie->pci, reg + 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) dw_pcie_writel_dbi(rk_pcie->pci, reg, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static void rk_pcie_ep_setup(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) u32 lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct device *dev = rk_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int resbar_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* Enable client write and read interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* Enable core write interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Enable core read interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ret = of_property_read_u32(np, "num-lanes", &lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) lanes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* Set the number of lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) val = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_PORT_LINK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) val &= ~PORT_LINK_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) val |= PORT_LINK_MODE_1_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) val |= PORT_LINK_MODE_2_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) val |= PORT_LINK_MODE_4_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) val |= PORT_LINK_MODE_8_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) dev_err(dev, "num-lanes %u: invalid value\n", lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_PORT_LINK_CONTROL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /* Set link width speed control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) val = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) val |= PCIE_DIRECT_SPEED_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* Enable bus master and memory space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) resbar_base = rk_pci_find_resbar_capability(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!resbar_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) dev_warn(dev, "failed to find resbar_base\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /* Resize BAR0 to support 512GB, BAR1 to support 8M, BAR2~5 to support 64M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4, 0xfffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8, 0x13c0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0xc, 0xfffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x10, 0x3c0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (bar = 2; bar < 6; bar++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8 + bar * 0x8, 0x6c0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* Set flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) rk_pcie_ep_set_bar_flag(rk_pcie, BAR_0, PCI_BASE_ADDRESS_MEM_TYPE_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) rk_pcie_ep_set_bar_flag(rk_pcie, BAR_1, PCI_BASE_ADDRESS_MEM_TYPE_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) rk_pcie_ep_set_bar_flag(rk_pcie, BAR_2, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) rk_pcie_ep_set_bar_flag(rk_pcie, BAR_4, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* Device id and class id needed for request bar address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) dw_pcie_writew_dbi(rk_pcie->pci, PCI_DEVICE_ID, 0x356a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) dw_pcie_writew_dbi(rk_pcie->pci, PCI_CLASS_DEVICE, 0x0580);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* Set shadow BAR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (rk_pcie->is_rk1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) val = rk_pcie->mem_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static int rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct device *dev = rk_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ret = of_property_read_u32(np, "num-ib-windows",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) &rk_pcie->num_ib_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dev_err(dev, "unable to read *num-ib-windows* property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (rk_pcie->num_ib_windows > MAX_IATU_IN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) dev_err(dev, "Invalid *num-ib-windows*\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ret = of_property_read_u32(np, "num-ob-windows",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) &rk_pcie->num_ob_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dev_err(dev, "Unable to read *num-ob-windows* property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (rk_pcie->num_ob_windows > MAX_IATU_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) dev_err(dev, "Invalid *num-ob-windows*\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) rk_pcie->ib_window_map = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) BITS_TO_LONGS(rk_pcie->num_ib_windows),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) sizeof(long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (!rk_pcie->ib_window_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) rk_pcie->ob_window_map = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) BITS_TO_LONGS(rk_pcie->num_ob_windows),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) sizeof(long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (!rk_pcie->ob_window_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) addr = devm_kcalloc(dev, rk_pcie->num_ob_windows, sizeof(phys_addr_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) rk_pcie->outbound_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static int rk_pcie_msi_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static int rk_pcie_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) dw_pcie_setup_rc(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ret = rk_pcie_establish_link(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static struct dw_pcie_host_ops rk_pcie_host_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .host_init = rk_pcie_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static int rk_add_pcie_port(struct rk_pcie *rk_pcie, struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct dw_pcie *pci = rk_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) pp->msi_irq = platform_get_irq_byname(pdev, "msi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /* If msi_irq is invalid, use outband msi routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (pp->msi_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dev_info(dev, "use outband MSI support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) rk_pcie_host_ops.msi_host_init = rk_pcie_msi_host_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) pp->ops = &rk_pcie_host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ret = dw_pcie_host_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) dev_err(dev, "failed to initialize host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int rk_pcie_add_ep(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct device *dev = rk_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct device_node *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct resource reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) mem = of_parse_phandle(np, "memory-region", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) dev_err(dev, "missing \"memory-region\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) ret = of_address_to_resource(mem, 0, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) dev_err(dev, "missing \"reg\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) rk_pcie->mem_start = reg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) rk_pcie->mem_size = resource_size(®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ret = rk_pcie_ep_win_parse(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) dev_err(dev, "failed to parse ep dts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) rk_pcie->pci->dbi_base2 = rk_pcie->pci->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) rk_pcie->pci->atu_base = rk_pcie->pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) rk_pcie->pci->iatu_unroll_enabled = rk_pcie_iatu_unroll_enabled(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = rk_pcie_ep_atu_init(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) dev_err(dev, "failed to init ep device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) rk_pcie_ep_setup(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ret = rk_pcie_establish_link(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) dev_err(dev, "failed to establish pcie link\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (!rk_pcie_udma_enabled(rk_pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) rk_pcie->dma_obj = rk_pcie_dma_obj_probe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (IS_ERR(rk_pcie->dma_obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dev_err(dev, "failed to prepare dma object\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) static int rk_pcie_clk_init(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct device *dev = rk_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) rk_pcie->clk_cnt = devm_clk_bulk_get_all(dev, &rk_pcie->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (rk_pcie->clk_cnt < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int rk_pcie_resource_get(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct resource *dbi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct resource *apb_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) "pcie-dbi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (!dbi_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) dev_err(&pdev->dev, "get pcie-dbi failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) rk_pcie->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (IS_ERR(rk_pcie->dbi_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return PTR_ERR(rk_pcie->dbi_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) rk_pcie->pci->dbi_base = rk_pcie->dbi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) apb_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) "pcie-apb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!apb_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) dev_err(&pdev->dev, "get pcie-apb failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) rk_pcie->apb_base = devm_ioremap_resource(&pdev->dev, apb_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (IS_ERR(rk_pcie->apb_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return PTR_ERR(rk_pcie->apb_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * Rest the device before enabling power because some of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * platforms may use external refclk input with the some power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * rail connect to 100MHz OSC chip. So once the power is up for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * the slot and the refclk is available, which isn't quite follow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * the spec. We should make sure it is in reset state before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * everthing's ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) rk_pcie->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (IS_ERR(rk_pcie->rst_gpio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) dev_err(&pdev->dev, "invalid reset-gpios property in node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return PTR_ERR(rk_pcie->rst_gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (device_property_read_u32(&pdev->dev, "rockchip,perst-inactive-ms",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) &rk_pcie->perst_inactive_ms))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) rk_pcie->perst_inactive_ms = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) rk_pcie->prsnt_gpio = devm_gpiod_get_optional(&pdev->dev, "prsnt", GPIOD_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (IS_ERR_OR_NULL(rk_pcie->prsnt_gpio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) dev_info(&pdev->dev, "invalid prsnt-gpios property in node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static int rk_pcie_phy_init(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct device *dev = rk_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) rk_pcie->phy = devm_phy_get(dev, "pcie-phy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (IS_ERR(rk_pcie->phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (PTR_ERR(rk_pcie->phy) != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) dev_info(dev, "missing phy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return PTR_ERR(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) switch (rk_pcie->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) case RK_PCIE_RC_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) rk_pcie->phy_mode = PHY_MODE_PCIE; /* make no sense */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) rk_pcie->phy_sub_mode = PHY_MODE_PCIE_RC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) case RK_PCIE_EP_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) rk_pcie->phy_mode = PHY_MODE_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) rk_pcie->phy_sub_mode = PHY_MODE_PCIE_EP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) rk_pcie->phy_sub_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) dev_err(dev, "fail to set phy to mode %s, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (rk_pcie->bifurcation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) PHY_MODE_PCIE_BIFURCATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) ret = phy_init(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) dev_err(dev, "fail to init phy, err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) phy_power_on(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static int rk_pcie_reset_grant_ctrl(struct rk_pcie *rk_pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) u32 val = (0x1 << 18); /* Write mask bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) val |= (0x1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) ret = regmap_write(rk_pcie->usb_pcie_grf, 0x0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) static void rk_pcie_start_dma_rd(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct rk_pcie *rk_pcie = dev_get_drvdata(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) cur->enb.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_CTRL_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) cur->ctx_reg.ctrllo.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_CTRL_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) cur->ctx_reg.ctrlhi.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_XFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) cur->ctx_reg.xfersize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_SAR_PTR_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) cur->ctx_reg.sarptrlo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_SAR_PTR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) cur->ctx_reg.sarptrhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_DAR_PTR_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) cur->ctx_reg.darptrlo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_DAR_PTR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) cur->ctx_reg.darptrhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) cur->start.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static void rk_pcie_start_dma_wr(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) struct rk_pcie *rk_pcie = dev_get_drvdata(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) cur->enb.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_CTRL_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) cur->ctx_reg.ctrllo.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_CTRL_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) cur->ctx_reg.ctrlhi.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_XFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) cur->ctx_reg.xfersize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_SAR_PTR_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) cur->ctx_reg.sarptrlo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_SAR_PTR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) cur->ctx_reg.sarptrhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_DAR_PTR_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) cur->ctx_reg.darptrlo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_DAR_PTR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) cur->ctx_reg.darptrhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_WEILO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) cur->weilo.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) cur->start.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static void rk_pcie_start_dma_dwc(struct dma_trx_obj *obj, struct dma_table *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) int dir = table->dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int chn = table->chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) int ctr_off = PCIE_DMA_OFFSET + chn * 0x200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (dir == DMA_FROM_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) rk_pcie_start_dma_rd(obj, table, ctr_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) else if (dir == DMA_TO_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) rk_pcie_start_dma_wr(obj, table, ctr_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static void rk_pcie_config_dma_dwc(struct dma_table *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) table->enb.enb = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) table->ctx_reg.ctrllo.lie = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) table->ctx_reg.ctrllo.rie = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) table->ctx_reg.ctrllo.td = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) table->ctx_reg.ctrlhi.asdword = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) table->ctx_reg.xfersize = table->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (table->dir == DMA_FROM_BUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) table->ctx_reg.sarptrlo = (u32)(table->bus & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) table->ctx_reg.sarptrhi = (u32)(table->bus >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) table->ctx_reg.darptrlo = (u32)(table->local & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) table->ctx_reg.darptrhi = (u32)(table->local >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) } else if (table->dir == DMA_TO_BUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) table->ctx_reg.sarptrlo = (u32)(table->local & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) table->ctx_reg.sarptrhi = (u32)(table->local >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) table->ctx_reg.darptrlo = (u32)(table->bus & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) table->ctx_reg.darptrhi = (u32)(table->bus >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) table->weilo.weight0 = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) table->start.stop = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) table->start.chnl = table->chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) static irqreturn_t rk_pcie_sys_irq_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct rk_pcie *rk_pcie = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) u32 chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) union int_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) union int_clear clears;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) u32 reg, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) PCIE_DMA_WR_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) for (chn = 0; chn < PCIE_DMA_CHANEL_MAX_NUM; chn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (status.donesta & BIT(chn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) clears.doneclr = 0x1 << chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) PCIE_DMA_WR_INT_CLEAR, clears.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (rk_pcie->dma_obj && rk_pcie->dma_obj->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) rk_pcie->dma_obj->cb(rk_pcie->dma_obj, chn, DMA_TO_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (status.abortsta & BIT(chn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) dev_err(rk_pcie->pci->dev, "%s, abort\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) clears.abortclr = 0x1 << chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) PCIE_DMA_WR_INT_CLEAR, clears.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) PCIE_DMA_RD_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) for (chn = 0; chn < PCIE_DMA_CHANEL_MAX_NUM; chn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (status.donesta & BIT(chn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) clears.doneclr = 0x1 << chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) PCIE_DMA_RD_INT_CLEAR, clears.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (rk_pcie->dma_obj && rk_pcie->dma_obj->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) rk_pcie->dma_obj->cb(rk_pcie->dma_obj, chn, DMA_FROM_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (status.abortsta & BIT(chn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) dev_err(rk_pcie->pci->dev, "%s, abort\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) clears.abortclr = 0x1 << chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) PCIE_DMA_RD_INT_CLEAR, clears.asdword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) reg = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (reg & BIT(2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /* Setup command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) val &= 0xffff0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static int rk_pcie_request_sys_irq(struct rk_pcie *rk_pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) irq = platform_get_irq_byname(pdev, "sys");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) dev_err(rk_pcie->pci->dev, "missing sys IRQ resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) ret = devm_request_irq(rk_pcie->pci->dev, irq, rk_pcie_sys_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) IRQF_SHARED, "pcie-sys", rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) dev_err(rk_pcie->pci->dev, "failed to request PCIe subsystem IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static const struct rk_pcie_of_data rk_pcie_rc_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) .mode = RK_PCIE_RC_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static const struct rk_pcie_of_data rk_pcie_ep_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) .mode = RK_PCIE_EP_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static const struct of_device_id rk_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) .compatible = "rockchip,rk1808-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) .data = &rk_pcie_rc_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) .compatible = "rockchip,rk1808-pcie-ep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) .data = &rk_pcie_ep_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) .compatible = "rockchip,rk3568-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) .data = &rk_pcie_rc_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) .compatible = "rockchip,rk3568-pcie-ep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) .data = &rk_pcie_ep_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) .compatible = "rockchip,rk3588-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) .data = &rk_pcie_rc_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) .compatible = "rockchip,rk3588-pcie-ep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) .data = &rk_pcie_ep_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) MODULE_DEVICE_TABLE(of, rk_pcie_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) static const struct dw_pcie_ops dw_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) .start_link = rk_pcie_establish_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) .link_up = rk_pcie_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static int rk1808_pcie_fixup(struct rk_pcie *rk_pcie, struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct device *dev = rk_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) rk_pcie->usb_pcie_grf = syscon_regmap_lookup_by_phandle(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) "rockchip,usbpciegrf");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (IS_ERR(rk_pcie->usb_pcie_grf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) dev_err(dev, "failed to find usb_pcie_grf regmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return PTR_ERR(rk_pcie->usb_pcie_grf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) rk_pcie->pmu_grf = syscon_regmap_lookup_by_phandle(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) "rockchip,pmugrf");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (IS_ERR(rk_pcie->pmu_grf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) dev_err(dev, "failed to find pmugrf regmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) return PTR_ERR(rk_pcie->pmu_grf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /* Workaround for pcie, switch to PCIe_PRSTNm0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ret = regmap_write(rk_pcie->pmu_grf, 0x100, 0x01000100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) ret = regmap_write(rk_pcie->pmu_grf, 0x0, 0x0c000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /* release link reset grant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) ret = rk_pcie_reset_grant_ctrl(rk_pcie, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) static void rk_pcie_fast_link_setup(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /* LTSSM EN ctrl mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) val |= PCIE_LTSSM_ENABLE_ENHANCE | (PCIE_LTSSM_ENABLE_ENHANCE << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static void rk_pcie_legacy_irq_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) struct rk_pcie *rk_pcie = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) raw_spin_lock_irqsave(&rk_pcie->intx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) MASK_LEGACY_INT(d->hwirq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) raw_spin_unlock_irqrestore(&rk_pcie->intx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static void rk_pcie_legacy_irq_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct rk_pcie *rk_pcie = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) raw_spin_lock_irqsave(&rk_pcie->intx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) UNMASK_LEGACY_INT(d->hwirq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) raw_spin_unlock_irqrestore(&rk_pcie->intx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static int rk_pcie_irq_set_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) const struct cpumask *mask_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) struct rk_pcie *priv = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (!force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) cpu = cpumask_any_and(mask_val, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) cpu = cpumask_first(mask_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) #if defined(MODULE) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) irq_set_affinity_hint(priv->legacy_parent_irq, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) irq_set_affinity(priv->legacy_parent_irq, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) irq_data_update_effective_affinity(d, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return IRQ_SET_MASK_OK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) static struct irq_chip rk_pcie_legacy_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) .name = "rk-pcie-legacy-int",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) .irq_enable = rk_pcie_legacy_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) .irq_disable = rk_pcie_legacy_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) .irq_mask = rk_pcie_legacy_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) .irq_unmask = rk_pcie_legacy_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) .irq_set_affinity = rk_pcie_irq_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) static int rk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_simple_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static const struct irq_domain_ops intx_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) .map = rk_pcie_intx_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static void rk_pcie_legacy_int_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) struct rk_pcie *rockchip = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) struct device *dev = rockchip->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) u32 hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) u32 virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) reg = rk_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) reg = reg & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) while (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) hwirq = ffs(reg) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) reg &= ~BIT(hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) virq = irq_find_mapping(rockchip->irq_domain, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static int rk_pcie_init_irq_domain(struct rk_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct device *dev = rockchip->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) struct device_node *intc = of_get_next_child(dev->of_node, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (!intc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) dev_err(dev, "missing child interrupt-controller node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) raw_spin_lock_init(&rockchip->intx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) &intx_domain_ops, rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (!rockchip->irq_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) dev_err(dev, "failed to get a INTx IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static int rk_pcie_enable_power(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) struct device *dev = rk_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (IS_ERR(rk_pcie->vpcie3v3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) ret = regulator_enable(rk_pcie->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) dev_err(dev, "fail to enable vpcie3v3 regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) static int rk_pcie_disable_power(struct rk_pcie *rk_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) struct device *dev = rk_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (IS_ERR(rk_pcie->vpcie3v3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) ret = regulator_disable(rk_pcie->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) dev_err(dev, "fail to disable vpcie3v3 regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) #define RAS_DES_EVENT(ss, v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) dw_pcie_writel_dbi(pcie->pci, cap_base + 8, v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) seq_printf(s, ss "0x%x\n", dw_pcie_readl_dbi(pcie->pci, cap_base + 0xc)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static int rockchip_pcie_rasdes_show(struct seq_file *s, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct rk_pcie *pcie = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) int cap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) u32 val = rk_pcie_readl_apb(pcie, PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) char *pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (val & BIT(6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) pm = "In training";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) else if (val & BIT(5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) pm = "L1.2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) else if (val & BIT(4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) pm = "L1.1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) else if (val & BIT(3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) pm = "L1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) else if (val & BIT(2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) pm = "L0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) else if (val & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) pm = (val == 0x3) ? "L0s" : (val & BIT(1) ? "RX L0s" : "TX L0s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) pm = "Invalid";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) seq_printf(s, "Common event signal status: 0x%s\n", pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (!cap_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) RAS_DES_EVENT("EBUF Overflow: ", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) RAS_DES_EVENT("EBUF Under-run: ", 0x0010000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) RAS_DES_EVENT("Decode Error: ", 0x0020000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) RAS_DES_EVENT("Running Disparity Error: ", 0x0030000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) RAS_DES_EVENT("SKP OS Parity Error: ", 0x0040000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) RAS_DES_EVENT("SYNC Header Error: ", 0x0050000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) RAS_DES_EVENT("CTL SKP OS Parity Error: ", 0x0060000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) RAS_DES_EVENT("Detect EI Infer: ", 0x1050000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) RAS_DES_EVENT("Receiver Error: ", 0x1060000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) RAS_DES_EVENT("Rx Recovery Request: ", 0x1070000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) RAS_DES_EVENT("N_FTS Timeout: ", 0x1080000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) RAS_DES_EVENT("Framing Error: ", 0x1090000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) RAS_DES_EVENT("Deskew Error: ", 0x10a0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) RAS_DES_EVENT("BAD TLP: ", 0x2000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) RAS_DES_EVENT("LCRC Error: ", 0x2010000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) RAS_DES_EVENT("BAD DLLP: ", 0x2020000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) RAS_DES_EVENT("Replay Number Rollover: ", 0x2030000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) RAS_DES_EVENT("Replay Timeout: ", 0x2040000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) RAS_DES_EVENT("Rx Nak DLLP: ", 0x2050000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) RAS_DES_EVENT("Tx Nak DLLP: ", 0x2060000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) RAS_DES_EVENT("Retry TLP: ", 0x2070000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) RAS_DES_EVENT("FC Timeout: ", 0x3000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) RAS_DES_EVENT("Poisoned TLP: ", 0x3010000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) RAS_DES_EVENT("ECRC Error: ", 0x3020000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) RAS_DES_EVENT("Unsupported Request: ", 0x3030000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) RAS_DES_EVENT("Completer Abort: ", 0x3040000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) RAS_DES_EVENT("Completion Timeout: ", 0x3050000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static int rockchip_pcie_rasdes_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) return single_open(file, rockchip_pcie_rasdes_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) inode->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static ssize_t rockchip_pcie_rasdes_write(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) struct seq_file *s = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct rk_pcie *pcie = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) int cap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (!cap_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (!strncmp(buf, "enable", 6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) dev_info(pcie->pci->dev, "RAS DES Event: Enable ALL!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) dw_pcie_writel_dbi(pcie->pci, cap_base + 8, 0x1c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) dw_pcie_writel_dbi(pcie->pci, cap_base + 8, 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) } else if (!strncmp(buf, "disable", 7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) dev_info(pcie->pci->dev, "RAS DES Event: disable ALL!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) dw_pcie_writel_dbi(pcie->pci, cap_base + 8, 0x14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) } else if (!strncmp(buf, "clear", 5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) dev_info(pcie->pci->dev, "RAS DES Event: Clear ALL!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) dw_pcie_writel_dbi(pcie->pci, cap_base + 8, 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) dev_info(pcie->pci->dev, "Not support command!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) static const struct file_operations rockchip_pcie_rasdes_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) .open = rockchip_pcie_rasdes_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) .write = rockchip_pcie_rasdes_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static int rockchip_pcie_fifo_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct rk_pcie *pcie = (struct rk_pcie *)dev_get_drvdata(s->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) u32 loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) seq_printf(s, "ltssm = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) rk_pcie_readl_apb(pcie, PCIE_CLIENT_LTSSM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) for (loop = 0; loop < 64; loop++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) seq_printf(s, "fifo_status = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) rk_pcie_readl_apb(pcie, PCIE_CLIENT_DBG_FIFO_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static void rockchip_pcie_debugfs_exit(struct rk_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) debugfs_remove_recursive(pcie->debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) pcie->debugfs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) static int rockchip_pcie_debugfs_init(struct rk_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct dentry *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) pcie->debugfs = debugfs_create_dir(dev_name(pcie->pci->dev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (!pcie->debugfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) debugfs_create_devm_seqfile(pcie->pci->dev, "dumpfifo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) pcie->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) rockchip_pcie_fifo_show);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) file = debugfs_create_file("err_event", 0644, pcie->debugfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) pcie, &rockchip_pcie_rasdes_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) goto remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) rockchip_pcie_debugfs_exit(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static int rk_pcie_really_probe(void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) struct platform_device *pdev = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) struct rk_pcie *rk_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) const struct rk_pcie_of_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) enum rk_pcie_device_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) match = of_match_device(rk_pcie_of_match, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (!match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) goto release_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) data = (struct rk_pcie_of_data *)match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) mode = (enum rk_pcie_device_mode)data->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) rk_pcie = devm_kzalloc(dev, sizeof(*rk_pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (!rk_pcie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) goto release_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (!pci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) goto release_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) pci->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) pci->ops = &dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) rk_pcie->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) rk_pcie->pci = pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (of_device_is_compatible(np, "rockchip,rk1808-pcie") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) of_device_is_compatible(np, "rockchip,rk1808-pcie-ep"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) rk_pcie->is_rk1808 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) rk_pcie->is_rk1808 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (device_property_read_bool(dev, "rockchip,bifurcation"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) rk_pcie->bifurcation = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) ret = rk_pcie_resource_get(pdev, rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) dev_err(dev, "resource init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) goto release_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (!IS_ERR_OR_NULL(rk_pcie->prsnt_gpio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (!gpiod_get_value(rk_pcie->prsnt_gpio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) goto release_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) rk_pcie->supports_clkreq = device_property_read_bool(dev, "supports-clkreq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) retry_regulator:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) /* DON'T MOVE ME: must be enable before phy init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) rk_pcie->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (IS_ERR(rk_pcie->vpcie3v3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /* Deferred but in threaded context for most 10s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (++val < 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) goto retry_regulator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) ret = PTR_ERR(rk_pcie->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) goto release_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) dev_info(dev, "no vpcie3v3 regulator found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) ret = rk_pcie_enable_power(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) goto release_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) ret = rk_pcie_phy_init(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) dev_err(dev, "phy init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) goto disable_vpcie3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) rk_pcie->rsts = devm_reset_control_array_get_exclusive(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (IS_ERR(rk_pcie->rsts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) ret = PTR_ERR(rk_pcie->rsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) dev_err(dev, "failed to get reset lines\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) goto disable_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) reset_control_deassert(rk_pcie->rsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) ret = rk_pcie_request_sys_irq(rk_pcie, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) dev_err(dev, "pcie irq init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) goto disable_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) platform_set_drvdata(pdev, rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) ret = rk_pcie_clk_init(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) dev_err(dev, "clock init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) goto disable_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (rk_pcie->is_rk1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) ret = rk1808_pcie_fixup(rk_pcie, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) goto deinit_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) rk_pcie_fast_link_setup(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /* Legacy interrupt is optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) ret = rk_pcie_init_irq_domain(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) irq = platform_get_irq_byname(pdev, "legacy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) rk_pcie->legacy_parent_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) irq_set_chained_handler_and_data(irq, rk_pcie_legacy_int_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) /* Unmask all legacy interrupt from INTA~INTD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) UNMASK_ALL_LEGACY_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) dev_info(dev, "missing legacy IRQ resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /* Set PCIe mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) rk_pcie_set_mode(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) /* Force into loopback master mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (device_property_read_bool(dev, "rockchip,lpbk-master")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) val |= PORT_LINK_LPBK_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) rk_pcie->is_signal_test = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /* Force into compliance mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (device_property_read_bool(dev, "rockchip,compliance-mode")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) val |= BIT(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) rk_pcie->is_signal_test = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) /* Skip waiting for training to pass in system PM routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (device_property_read_bool(dev, "rockchip,skip-scan-in-resume"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) rk_pcie->skip_scan_in_resume = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) switch (rk_pcie->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) case RK_PCIE_RC_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) ret = rk_add_pcie_port(rk_pcie, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) case RK_PCIE_EP_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) ret = rk_pcie_add_ep(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (rk_pcie->is_signal_test == true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) goto remove_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) ret = rk_pcie_init_dma_trx(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) dev_err(dev, "failed to add dma extension\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (rk_pcie->dma_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) rk_pcie->dma_obj->start_dma_func = rk_pcie_start_dma_dwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) rk_pcie->dma_obj->config_dma_func = rk_pcie_config_dma_dwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (rk_pcie->is_rk1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) /* hold link reset grant after link-up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) ret = rk_pcie_reset_grant_ctrl(rk_pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) goto remove_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) device_init_wakeup(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) /* Enable async system PM for multiports SoC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) device_enable_async_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (IS_ENABLED(CONFIG_DEBUG_FS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) ret = rockchip_pcie_debugfs_init(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) dev_err(dev, "failed to setup debugfs: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) /* Enable RASDES Error event by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_VNDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (!val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) dev_err(dev, "Not able to find RASDES CAP!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) dw_pcie_writel_dbi(rk_pcie->pci, val + 8, 0x1c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) dw_pcie_writel_dbi(rk_pcie->pci, val + 8, 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) remove_irq_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (rk_pcie->irq_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) irq_domain_remove(rk_pcie->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) disable_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) phy_power_off(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) phy_exit(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) deinit_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) disable_vpcie3v3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) rk_pcie_disable_power(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) release_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) device_release_driver(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) static int rk_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (IS_ERR(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) dev_err(&pdev->dev, "start rk-pcie thread failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) return PTR_ERR(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return rk_pcie_really_probe(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) static void rk_pcie_downstream_dev_to_d0(struct rk_pcie *rk_pcie, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) struct pcie_port *pp = &rk_pcie->pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) struct pci_bus *child, *root_bus = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) struct pci_dev *pdev, *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) list_for_each_entry(child, &pp->bridge->bus->children, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) /* Bring downstream devices to D3 if they are not already in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (child->parent == pp->bridge->bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) root_bus = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) bridge = root_bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (!root_bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) dev_err(rk_pcie->pci->dev, "Failed to find downstream devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) /* Save and restore root bus ASPM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (rk_pcie->l1ss_ctl1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) dw_pcie_writel_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1, rk_pcie->l1ss_ctl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) /* rk_pcie->aspm woule be saved in advance when enable is false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, rk_pcie->aspm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (val & PCI_L1SS_CTL1_L1SS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) rk_pcie->l1ss_ctl1 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) rk_pcie->l1ss_ctl1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) rk_pcie->aspm = val & PCI_EXP_LNKCTL_ASPMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) val &= ~(PCI_EXP_LNKCAP_ASPM_L1 | PCI_EXP_LNKCAP_ASPM_L0S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) list_for_each_entry(pdev, &root_bus->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (PCI_SLOT(pdev->devfn) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if (pci_set_power_state(pdev, PCI_D0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) dev_err(rk_pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) "Failed to transition %s to D3hot state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (rk_pcie->l1ss_ctl1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) val &= ~PCI_L1SS_CTL1_L1SS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) val |= (rk_pcie->l1ss_ctl1 & PCI_L1SS_CTL1_L1SS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) PCI_EXP_LNKCTL_ASPMC, rk_pcie->aspm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) static int __maybe_unused rockchip_dw_pcie_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) int ret = 0, power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) struct dw_pcie *pci = rk_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * This is as per PCI Express Base r5.0 r1.0 May 22-2019,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * 5.2 Link State Power Management (Page #440).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * L2/L3 Ready entry negotiations happen while in the L0 state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * L2/L3 Ready are entered only after the negotiation completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * The following example sequence illustrates the multi-step Link state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * transition process leading up to entering a system sleep state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * 1. System software directs all Functions of a Downstream component to D3Hot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * 2. The Downstream component then initiates the transition of the Link to L1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * as required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * 3. System software then causes the Root Complex to broadcast the PME_Turn_Off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * Message in preparation for removing the main power source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * 4. This Message causes the subject Link to transition back to L0 in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * send it and to enable the Downstream component to respond with PME_TO_Ack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * 5. After sending the PME_TO_Ack, the Downstream component initiates the L2/L3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * Ready transition protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) /* 1. All sub-devices are in D3hot by PCIe stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) rk_pcie_link_status_clear(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * Wlan devices will be shutdown from function driver now, so doing L2 here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * must fail. Skip L2 routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (rk_pcie->skip_scan_in_resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) rfkill_get_wifi_power_state(&power);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (!power)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) goto no_l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) /* 2. Broadcast PME_Turn_Off Message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_MSG_GEN, PME_TURN_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_MSG_GEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) status, !(status & BIT(4)), 20, RK_PCIE_L2_TMOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) dev_err(dev, "Failed to send PME_Turn_Off\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) goto no_l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) /* 3. Wait for PME_TO_Ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_INTR_STATUS_MSG_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) status, status & BIT(9), 20, RK_PCIE_L2_TMOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) dev_err(dev, "Failed to receive PME_TO_Ack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) goto no_l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) /* 4. Clear PME_TO_Ack and Wait for ready to enter L23 message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MSG_RX, PME_TO_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_POWER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) status, status & READY_ENTER_L23, 20, RK_PCIE_L2_TMOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) dev_err(dev, "Failed to ready to enter L23\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) goto no_l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) /* 5. Check we are in L2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) status, ((status & S_MAX) == S_L2_IDLE), 20, RK_PCIE_L2_TMOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) dev_err(pci->dev, "Link isn't in L2 idle!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) no_l2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) rk_pcie_disable_ltssm(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) /* make sure assert phy success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) usleep_range(200, 300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) phy_power_off(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) phy_exit(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) rk_pcie->in_suspend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) ret = rk_pcie_disable_power(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) static int __maybe_unused rockchip_dw_pcie_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) reset_control_assert(rk_pcie->rsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) reset_control_deassert(rk_pcie->rsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) ret = rk_pcie_enable_power(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) rk_pcie->phy_sub_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) dev_err(dev, "fail to set phy to mode %s, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) ret = phy_init(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) dev_err(dev, "fail to init phy, err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) phy_power_on(rk_pcie->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (rk_pcie->is_rk1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /* release link reset grant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) ret = rk_pcie_reset_grant_ctrl(rk_pcie, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) rk_pcie_fast_link_setup(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) /* Set PCIe mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) rk_pcie_set_mode(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (std_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) dw_pcie_setup_rc(&rk_pcie->pci->pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) ret = rk_pcie_establish_link(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) dev_err(dev, "failed to establish pcie link\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (std_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) goto std_rc_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) ret = rk_pcie_ep_atu_init(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) dev_err(dev, "failed to init ep device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) rk_pcie_ep_setup(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) rk_pcie->in_suspend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) std_rc_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /* hold link reset grant after link-up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) if (rk_pcie->is_rk1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) ret = rk_pcie_reset_grant_ctrl(rk_pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) rk_pcie_disable_power(rk_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static int rockchip_dw_pcie_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if ((val & S_MAX) != S_L0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) rk_pcie_downstream_dev_to_d0(rk_pcie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) static void rockchip_dw_pcie_complete(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) rk_pcie_downstream_dev_to_d0(rk_pcie, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) static const struct dev_pm_ops rockchip_dw_pcie_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) .prepare = rockchip_dw_pcie_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) .complete = rockchip_dw_pcie_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_dw_pcie_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) rockchip_dw_pcie_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) static struct platform_driver rk_plat_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) .name = "rk-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) .of_match_table = rk_pcie_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) .pm = &rockchip_dw_pcie_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) .probe = rk_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) module_platform_driver(rk_plat_pcie_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) MODULE_DESCRIPTION("RockChip PCIe Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) MODULE_LICENSE("GPL v2");