^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Rockchip AXI PCIe host controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2016 Rockchip, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Shawn Lin <shawn.lin@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Wenrui Li <wenrui.li@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Bits taken from Synopsys DesignWare Host controller driver and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * ARM PCI Host generic driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bitrev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/pci_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "pcie-rockchip.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "rockchip-pcie-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void rk_pcie_start_dma_rk3399(struct dma_trx_obj *obj, struct dma_table *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct dma_table *tbl = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int chn = tbl->chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) rockchip_pcie_write(rockchip, (u32)(tbl->phys_descs & 0xffffffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) rockchip_pcie_write(rockchip, (u32)(tbl->phys_descs >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) rockchip_pcie_write(rockchip, BIT(0) | (tbl->dir << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void rk_pcie_config_dma_rk3399(struct dma_table *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 *desc = table->descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *(desc + 0) = (u32)(table->local & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *(desc + 1) = (u32)(table->local >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *(desc + 2) = (u32)(table->bus & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *(desc + 3) = (u32)(table->bus >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *(desc + 4) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *(desc + 5) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *(desc + 6) = table->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *(desc + 7) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *(desc + 8) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *(desc + 6) |= 1 << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Update Tx credit maximum update interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct pci_bus *bus, int dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Access only one slot on each root port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Do not read more than one device on the bus directly attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * to RC's downstream side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (pci_is_root_bus(bus) || pci_is_root_bus(bus->parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return dev == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u8 map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (rockchip->legacy_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return GENMASK(MAX_LANE_NUM - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) map = val & PCIE_CORE_LANE_MAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* The link may be using a reverse-indexed mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (val & PCIE_CORE_LANE_MAP_REVERSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) map = bitrev8(map) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!IS_ALIGNED((uintptr_t)addr, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (size == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *val = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) } else if (size == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *val = readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) } else if (size == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *val = readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u32 mask, tmp, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) offset = where & ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (size == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) writel(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * N.B. This read/modify/write isn't safe in general because it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * corrupt RW1C bits in adjacent registers. But the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * doesn't support smaller writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) tmp = readl(addr) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) tmp |= val << ((where & 0x3) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) writel(tmp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct pci_bus *bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u32 busdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (rockchip->in_remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) PCI_FUNC(devfn), where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!IS_ALIGNED(busdev, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (pci_is_root_bus(bus->parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) rockchip_pcie_cfg_configuration_accesses(rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) AXI_WRAPPER_TYPE0_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) rockchip_pcie_cfg_configuration_accesses(rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) AXI_WRAPPER_TYPE1_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (size == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *val = readl(rockchip->reg_base + busdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else if (size == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *val = readw(rockchip->reg_base + busdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) } else if (size == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *val = readb(rockchip->reg_base + busdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct pci_bus *bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 busdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (rockchip->in_remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) PCI_FUNC(devfn), where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!IS_ALIGNED(busdev, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (pci_is_root_bus(bus->parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rockchip_pcie_cfg_configuration_accesses(rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) AXI_WRAPPER_TYPE0_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rockchip_pcie_cfg_configuration_accesses(rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) AXI_WRAPPER_TYPE1_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (size == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) writel(val, rockchip->reg_base + busdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) else if (size == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) writew(val, rockchip->reg_base + busdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) else if (size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) writeb(val, rockchip->reg_base + busdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct rockchip_pcie *rockchip = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *val = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct rockchip_pcie *rockchip = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static struct pci_ops rockchip_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) .read = rockchip_pcie_rd_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) .write = rockchip_pcie_wr_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 status, scale, power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (IS_ERR(rockchip->vpcie3v3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * Set RC's captured slot power limit and scale if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * vpcie3v3 available. The default values are both zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * which means the software should set these two according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * to the actual power supply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) curr = regulator_get_current_limit(rockchip->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (curr <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) scale = 3; /* 0.001x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) curr = curr / 1000; /* convert to mA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) power = (curr * 3300) / 1000; /* milliwatt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!scale) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dev_warn(rockchip->dev, "invalid power supply\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) scale--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) power = power / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * rockchip_pcie_host_init_port - Initialize hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * @rockchip: PCIe port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int err, i = MAX_LANE_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int timeouts = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) err = rockchip_pcie_init_port(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Fix the transmitted FTS count desired to exit from L0s. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rockchip_pcie_set_power_limit(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Set RC's clock architecture as common clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) status |= PCI_EXP_LNKSTA_SLC << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Set RC's RCB to 128 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) status |= PCI_EXP_LNKCTL_RCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Enable Gen1 training */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) PCIE_CLIENT_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (rockchip->wait_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) timeouts = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* 500ms timeout value should be enough for Gen1/2 training */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) status, PCIE_LINK_UP(status), 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) timeouts * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) dev_err(dev, "PCIe link training gen1 timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) goto err_power_off_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) status, PCIE_LINK_IS_L0(status), 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) timeouts * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) dev_err(dev, "LTSSM is not L0!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (rockchip->link_gen == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Enable retrain for gen2. This should be configured only after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * gen1 finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) status |= PCI_EXP_LNKCTL_RL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) status, PCIE_LINK_IS_GEN2(status), 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 500 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Check the final link width from negotiated lane counter from MGMT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) PCIE_CORE_PL_CONF_LANE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dev_dbg(dev, "current link width is x%d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* Power off unused lane(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) for (i = 0; i < MAX_LANE_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (!(rockchip->lanes_map & BIT(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dev_dbg(dev, "idling lane %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) phy_power_off(rockchip->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* disable ltssm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (rockchip->dma_trx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_DISABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) PCIE_CLIENT_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) PCIE_CORE_CONFIG_VENDOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) rockchip_pcie_write(rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) PCIE_RC_CONFIG_RID_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Clear THP cap's next cap pointer to remove L1 substate cap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* Clear L0s from RC's link cap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) status |= PCIE_RC_CONFIG_DCSR_MPS_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) err_power_off_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) phy_power_off(rockchip->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) i = MAX_LANE_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) phy_exit(rockchip->phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) rockchip_pcie_handle_dma_interrupt(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) u32 dma_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct dma_trx_obj *obj = rockchip->dma_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dma_status = rockchip_pcie_read(rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Core: clear dma interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rockchip_pcie_write(rockchip, dma_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) WARN_ONCE(!(dma_status & 0x3), "dma_status 0x%x\n", dma_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (dma_status & (1 << 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) obj->irq_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) obj->dma_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (list_empty(&obj->tbl_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (obj->dma_free &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) obj->loop_count >= obj->loop_count_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) complete(&obj->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct rockchip_pcie *rockchip = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) u32 sub_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dev_dbg(dev, "reg = 0x%x, sub_reg = 0x%x\n", reg, sub_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (reg & PCIE_CLIENT_INT_LOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dev_dbg(dev, "local interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (sub_reg & PCIE_CORE_INT_PRFPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (sub_reg & PCIE_CORE_INT_CRFPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (sub_reg & PCIE_CORE_INT_RRPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (sub_reg & PCIE_CORE_INT_PRFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (sub_reg & PCIE_CORE_INT_CRFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (sub_reg & PCIE_CORE_INT_RT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) dev_dbg(dev, "replay timer timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (sub_reg & PCIE_CORE_INT_RTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (sub_reg & PCIE_CORE_INT_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dev_dbg(dev, "phy error detected on receive side\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (sub_reg & PCIE_CORE_INT_MTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dev_dbg(dev, "malformed TLP received from the link\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (sub_reg & PCIE_CORE_INT_UCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dev_dbg(dev, "malformed TLP received from the link\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (sub_reg & PCIE_CORE_INT_FCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (sub_reg & PCIE_CORE_INT_CT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dev_dbg(dev, "a request timed out waiting for completion\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (sub_reg & PCIE_CORE_INT_UTC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) dev_dbg(dev, "unmapped TC error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (sub_reg & PCIE_CORE_INT_MMVC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dev_dbg(dev, "MSI mask register changes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } else if (reg & PCIE_CLIENT_INT_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dev_dbg(dev, "phy link changes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) rockchip_pcie_update_txcredit_mui(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) rockchip_pcie_clr_bw_int(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (reg & PCIE_CLIENT_INT_UDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) rockchip_pcie_write(rockchip, sub_reg, PCIE_CLIENT_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) rockchip_pcie_write(rockchip, reg, PCIE_CLIENT_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rockchip_pcie_handle_dma_interrupt(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) PCIE_CLIENT_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct rockchip_pcie *rockchip = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dev_dbg(dev, "legacy done interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (reg & PCIE_CLIENT_INT_MSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dev_dbg(dev, "message done interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (reg & PCIE_CLIENT_INT_HOT_RST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dev_dbg(dev, "hot reset interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (reg & PCIE_CLIENT_INT_DPA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dev_dbg(dev, "dpa interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (reg & PCIE_CLIENT_INT_FATAL_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dev_dbg(dev, "fatal error interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dev_dbg(dev, "no fatal error interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (reg & PCIE_CLIENT_INT_CORR_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dev_dbg(dev, "correctable error interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (reg & PCIE_CLIENT_INT_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dev_dbg(dev, "phy interrupt received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) PCIE_CLIENT_INT_NFATAL_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) PCIE_CLIENT_INT_CORR_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) PCIE_CLIENT_INT_PHY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) PCIE_CLIENT_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u32 hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) u32 virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) while (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) hwirq = ffs(reg) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) reg &= ~BIT(hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) virq = irq_find_mapping(rockchip->irq_domain, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int irq, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) irq = platform_get_irq_byname(pdev, "sys");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) IRQF_SHARED, "pcie-sys", rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev_err(dev, "failed to request PCIe subsystem IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) irq = platform_get_irq_byname(pdev, "legacy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) irq_set_chained_handler_and_data(irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) rockchip_pcie_legacy_int_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) irq = platform_get_irq_byname(pdev, "client");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) IRQF_SHARED, "pcie-client", rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev_err(dev, "failed to request PCIe client IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * rockchip_pcie_parse_host_dt - Parse Device Tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * @rockchip: PCIe port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * Return: '0' on success and error value on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) err = rockchip_pcie_parse_dt(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (IS_ERR(rockchip->vpcie12v)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return PTR_ERR(rockchip->vpcie12v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) dev_info(dev, "no vpcie12v regulator found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (IS_ERR(rockchip->vpcie3v3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return PTR_ERR(rockchip->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dev_info(dev, "no vpcie3v3 regulator found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (IS_ERR(rockchip->vpcie1v8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return PTR_ERR(rockchip->vpcie1v8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (IS_ERR(rockchip->vpcie0v9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return PTR_ERR(rockchip->vpcie0v9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!IS_ERR(rockchip->vpcie12v)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) err = regulator_enable(rockchip->vpcie12v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dev_err(dev, "fail to enable vpcie12v regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!IS_ERR(rockchip->vpcie3v3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) err = regulator_enable(rockchip->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) dev_err(dev, "fail to enable vpcie3v3 regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) goto err_disable_12v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) err = regulator_enable(rockchip->vpcie1v8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dev_err(dev, "fail to enable vpcie1v8 regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto err_disable_3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) err = regulator_enable(rockchip->vpcie0v9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) dev_err(dev, "fail to enable vpcie0v9 regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) goto err_disable_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) err_disable_1v8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) regulator_disable(rockchip->vpcie1v8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) err_disable_3v3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!IS_ERR(rockchip->vpcie3v3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) regulator_disable(rockchip->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) err_disable_12v:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!IS_ERR(rockchip->vpcie12v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) regulator_disable(rockchip->vpcie12v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) PCIE_CORE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rockchip_pcie_enable_bw_int(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rockchip_pcie_write(rockchip, PCIE_UDMA_INT_ENABLE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static const struct irq_domain_ops intx_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .map = rockchip_pcie_intx_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct device_node *intc = of_get_next_child(dev->of_node, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!intc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dev_err(dev, "missing child interrupt-controller node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) &intx_domain_ops, rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) of_node_put(intc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!rockchip->irq_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) dev_err(dev, "failed to get a INTx IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int region_no, int type, u8 num_pass_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) u32 lower_addr, u32 upper_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) u32 ob_addr_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 ob_addr_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u32 ob_desc_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) u32 aw_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (num_pass_bits + 1 < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (num_pass_bits > 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (region_no == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (region_no != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) aw_offset = (region_no << OB_REG_SIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ob_addr_1 = upper_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ob_desc_0 = (1 << 23 | type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) rockchip_pcie_write(rockchip, ob_addr_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) rockchip_pcie_write(rockchip, ob_addr_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) rockchip_pcie_write(rockchip, ob_desc_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) PCIE_CORE_OB_REGION_DESC0 + aw_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) rockchip_pcie_write(rockchip, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) PCIE_CORE_OB_REGION_DESC1 + aw_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int region_no, u8 num_pass_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u32 lower_addr, u32 upper_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) u32 ib_addr_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) u32 ib_addr_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) u32 aw_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (num_pass_bits > 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ib_addr_1 = upper_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct device *dev = rockchip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) u64 pci_addr, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int reg_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) rockchip_pcie_cfg_configuration_accesses(rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) AXI_WRAPPER_TYPE0_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) size = resource_size(entry->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) pci_addr = entry->res->start - entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) rockchip->msg_bus_addr = pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) AXI_WRAPPER_MEM_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 20 - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) pci_addr + (reg_no << 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dev_err(dev, "program RC mem outbound ATU failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* Workaround for PCIe DMA transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (rockchip->dma_trx_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) rockchip_pcie_prog_ob_atu(rockchip, 1, AXI_WRAPPER_MEM_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 32 - 1, rockchip->mem_reserve_start, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dev_err(dev, "program RC mem inbound ATU failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /* store the register number offset to program RC io outbound ATU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) offset = size >> 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) size = resource_size(entry->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) pci_addr = entry->res->start - entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) err = rockchip_pcie_prog_ob_atu(rockchip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) reg_no + 1 + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) AXI_WRAPPER_IO_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 20 - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) pci_addr + (reg_no << 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) dev_err(dev, "program RC io outbound ATU failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* assign message regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) AXI_WRAPPER_NOR_MSG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 20 - 1, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) rockchip->msg_bus_addr += ((reg_no + offset) << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (!rockchip->msg_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Don't enter L2 state when no ep connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (rockchip->dma_trx_enabled == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* send PME_TURN_OFF message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* read LTSSM and wait for falling into L2 link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) value, PCIE_LINK_IS_L2(value), 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) jiffies_to_usecs(5 * HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) static int rockchip_pcie_suspend_for_user(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* disable core and cli int since we don't need to ack PME_ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ret = rockchip_pcie_wait_l2(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) rockchip_pcie_enable_interrupts(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /* disable ltssm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_DISABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) PCIE_CLIENT_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) rockchip_pcie_deinit_phys(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static int rockchip_pcie_resume_for_user(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) err = rockchip_pcie_host_init_port(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) err = rockchip_pcie_cfg_atu(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* Need this to enter L1 again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) rockchip_pcie_update_txcredit_mui(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) rockchip_pcie_enable_interrupts(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!rockchip->dma_trx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ret = rockchip_pcie_suspend_for_user(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) rockchip_pcie_disable_clocks(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) regulator_disable(rockchip->vpcie0v9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) err = regulator_enable(rockchip->vpcie0v9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) dev_err(dev, "fail to enable vpcie0v9 regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) err = rockchip_pcie_enable_clocks(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) goto err_disable_0v9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!rockchip->dma_trx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) err = rockchip_pcie_resume_for_user(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto err_disable_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) err_disable_clocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rockchip_pcie_disable_clocks(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) err_disable_0v9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) regulator_disable(rockchip->vpcie0v9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int rockchip_pcie_really_probe(struct rockchip_pcie *rockchip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) err = rockchip_pcie_host_init_port(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) err = rockchip_pcie_setup_irq(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) rockchip_pcie_enable_interrupts(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) err = rockchip_pcie_cfg_atu(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) rockchip->bridge->sysdata = rockchip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) rockchip->bridge->ops = &rockchip_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return pci_host_probe(rockchip->bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static ssize_t pcie_deferred_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) const char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) err = kstrtou32(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) rockchip->wait_ep = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) err = rockchip_pcie_really_probe(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static ssize_t pcie_reset_ep_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) const char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct dma_trx_obj *obj = rockchip->dma_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) dev_info(dev, "loop_cout = %d\n", obj->loop_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) err = kstrtou32(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (val == PCIE_USER_UNLINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) rockchip_pcie_suspend_for_user(rockchip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) else if (val == PCIE_USER_RELINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) rockchip_pcie_resume_for_user(rockchip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static DEVICE_ATTR_WO(pcie_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static DEVICE_ATTR_WO(pcie_reset_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static struct attribute *pcie_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) &dev_attr_pcie_deferred.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) &dev_attr_pcie_reset_ep.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static const struct attribute_group pcie_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .attrs = pcie_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static int rockchip_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct rockchip_pcie *rockchip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) rockchip = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) rockchip->bridge = bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) platform_set_drvdata(pdev, rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) rockchip->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) rockchip->is_rc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) err = rockchip_pcie_parse_host_dt(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) err = rockchip_pcie_enable_clocks(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) err = rockchip_pcie_set_vpcie(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) dev_err(dev, "failed to set vpcie regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) goto err_set_vpcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) err = rockchip_pcie_init_irq_domain(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) goto err_vpcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (rockchip->deferred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) err = sysfs_create_group(&pdev->dev.kobj, &pcie_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) dev_err(&pdev->dev, "SysFS group creation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) goto err_remove_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) err = rockchip_pcie_really_probe(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) dev_err(&pdev->dev, "deferred probe failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) goto err_deinit_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (rockchip->dma_trx_enabled == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) rockchip->dma_obj = rk_pcie_dma_obj_probe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (IS_ERR(rockchip->dma_obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) dev_err(dev, "failed to prepare dma object\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) goto err_deinit_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (rockchip->dma_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) rockchip->dma_obj->start_dma_func = rk_pcie_start_dma_rk3399;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) rockchip->dma_obj->config_dma_func = rk_pcie_config_dma_rk3399;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) err_deinit_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) rockchip_pcie_deinit_phys(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (rockchip->deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) sysfs_remove_group(&pdev->dev.kobj, &pcie_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) err_remove_irq_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) irq_domain_remove(rockchip->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) err_vpcie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (!IS_ERR(rockchip->vpcie12v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) regulator_disable(rockchip->vpcie12v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (!IS_ERR(rockchip->vpcie3v3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) regulator_disable(rockchip->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) regulator_disable(rockchip->vpcie1v8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) regulator_disable(rockchip->vpcie0v9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) err_set_vpcie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) rockchip_pcie_disable_clocks(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int rockchip_pcie_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) u32 status1, status2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) status1 = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) status2 = rockchip_pcie_read(rockchip, PCIE_CLIENT_DEBUG_OUT_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (!PCIE_LINK_UP(status1) || !PCIE_LINK_IS_L0(status2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) rockchip->in_remove = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) pci_stop_root_bus(bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) pci_remove_root_bus(bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) irq_domain_remove(rockchip->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* disable link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) status |= BIT(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) status &= ~BIT(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) rockchip_pcie_deinit_phys(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) rockchip_pcie_disable_clocks(rockchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (rockchip->dma_trx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) rk_pcie_dma_obj_remove(rockchip->dma_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (rockchip->deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) sysfs_remove_group(&pdev->dev.kobj, &pcie_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (!IS_ERR(rockchip->vpcie12v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) regulator_disable(rockchip->vpcie12v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (!IS_ERR(rockchip->vpcie3v3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) regulator_disable(rockchip->vpcie3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) regulator_disable(rockchip->vpcie1v8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) regulator_disable(rockchip->vpcie0v9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static const struct dev_pm_ops rockchip_pcie_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) rockchip_pcie_resume_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static const struct of_device_id rockchip_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) { .compatible = "rockchip,rk3399-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static struct platform_driver rockchip_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) .name = "rockchip-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .of_match_table = rockchip_pcie_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) .pm = &rockchip_pcie_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .probe = rockchip_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .remove = rockchip_pcie_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) module_platform_driver(rockchip_pcie_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) MODULE_AUTHOR("Rockchip Inc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) MODULE_LICENSE("GPL v2");