^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCIe host controller driver for Texas Instruments Keystone SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013-2014 Texas Instruments., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * https://www.ti.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Murali Karicheri <m-karicheri2@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Implementation based on pci-exynos.c and pcie-designware.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "../../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PCIE_VENDORID_MASK 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PCIE_DEVICEID_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Application registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define CMD_STATUS 0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define LTSSM_EN_VAL BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define OB_XLAT_EN_VAL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define DBI_CS2 BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CFG_SETUP 0x008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CFG_BUS(x) (((x) & 0xff) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define CFG_DEVICE(x) (((x) & 0x1f) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CFG_FUNC(x) ((x) & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CFG_TYPE1 BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define OB_SIZE 0x030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define OB_ENABLEN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define OB_WIN_SIZE 8 /* 8MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define PCIE_EP_IRQ_SET 0x64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define PCIE_EP_IRQ_CLR 0x68
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define INT_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* IRQ register defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define IRQ_EOI 0x050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define MSI_IRQ 0x054
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define MSI_IRQ_OFFSET 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define IRQ_STATUS(n) (0x184 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define INTx_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define ERR_IRQ_STATUS 0x1c4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define ERR_IRQ_ENABLE_SET 0x1c8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define ERR_AER BIT(5) /* ECRC error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define ERR_CORR BIT(3) /* Correctable error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define ERR_NONFATAL BIT(2) /* Non-fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define ERR_FATAL BIT(1) /* Fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define ERR_SYS BIT(0) /* System error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ERR_NONFATAL | ERR_FATAL | ERR_SYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* PCIE controller device IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define PCIE_RC_K2HK 0xb008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define PCIE_RC_K2E 0xb009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define PCIE_RC_K2L 0xb00a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define PCIE_RC_K2G 0xb00b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define EP 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define LEG_EP 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define RC 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define KS_PCIE_SYSCLOCKOUTEN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define AM654_PCIE_DEV_TYPE_MASK 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define AM654_WIN_SIZE SZ_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define APP_ADDR_SPACE_0 (16 * SZ_1K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct ks_pcie_of_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) enum dw_pcie_device_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const struct dw_pcie_host_ops *host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) const struct dw_pcie_ep_ops *ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct keystone_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* PCI Device ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u32 device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int legacy_host_irqs[PCI_NUM_INTX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct device_node *legacy_intc_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int msi_host_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct phy **phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct device_link **link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct device_node *msi_intc_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct irq_domain *legacy_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Application register space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void __iomem *va_app_base; /* DT 1st resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct resource app;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) bool is_am6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return readl(ks_pcie->va_app_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) writel(val, ks_pcie->va_app_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void ks_pcie_msi_irq_ack(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct pcie_port *pp = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct keystone_pcie *ks_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 irq = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u32 reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) reg_offset = irq % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) bit_pos = irq >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) BIT(bit_pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct pcie_port *pp = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct keystone_pcie *ks_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u64 msi_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) msi_target = ks_pcie->app.start + MSI_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) msg->address_lo = lower_32_bits(msi_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) msg->address_hi = upper_32_bits(msi_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) msg->data = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) (int)data->hwirq, msg->address_hi, msg->address_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void ks_pcie_msi_mask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct pcie_port *pp = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct keystone_pcie *ks_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u32 irq = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) raw_spin_lock_irqsave(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) reg_offset = irq % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bit_pos = irq >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) BIT(bit_pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) raw_spin_unlock_irqrestore(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void ks_pcie_msi_unmask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct pcie_port *pp = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct keystone_pcie *ks_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u32 irq = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u32 reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u32 bit_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) raw_spin_lock_irqsave(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) reg_offset = irq % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bit_pos = irq >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) BIT(bit_pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) raw_spin_unlock_irqrestore(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct irq_chip ks_pcie_msi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .name = "KEYSTONE-PCI-MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .irq_ack = ks_pcie_msi_irq_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .irq_set_affinity = ks_pcie_msi_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .irq_mask = ks_pcie_msi_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .irq_unmask = ks_pcie_msi_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static int ks_pcie_msi_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return dw_pcie_allocate_domains(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct dw_pcie *pci = ks_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u32 pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (BIT(0) & pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* EOI the INTx interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * Dummy function so that DW core doesn't configure MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct device *dev = ks_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (reg & ERR_SYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dev_err(dev, "System Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (reg & ERR_FATAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) dev_err(dev, "Fatal Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (reg & ERR_NONFATAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dev_dbg(dev, "Non Fatal Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (reg & ERR_CORR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) dev_dbg(dev, "Correctable Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!ks_pcie->is_am6 && (reg & ERR_AXI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dev_err(dev, "AXI tag lookup fatal Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dev_err(dev, "ECRC Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void ks_pcie_ack_legacy_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void ks_pcie_mask_legacy_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static struct irq_chip ks_pcie_legacy_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) .name = "Keystone-PCI-Legacy-IRQ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .irq_ack = ks_pcie_ack_legacy_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .irq_mask = ks_pcie_mask_legacy_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .irq_unmask = ks_pcie_unmask_legacy_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) irq_hw_number_t hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) handle_level_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) irq_set_chip_data(irq, d->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) .map = ks_pcie_init_legacy_irq_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .xlate = irq_domain_xlate_onetwocell,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Since modification of dbi_cs2 involves different clock domain, read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * status back to ensure the transition is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) val |= DBI_CS2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } while (!(val & DBI_CS2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * ks_pcie_clear_dbi_mode() - Disable DBI mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Since modification of dbi_cs2 involves different clock domain, read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * status back to ensure the transition is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) val &= ~DBI_CS2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) } while (val & DBI_CS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct dw_pcie *pci = ks_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) u32 num_viewport = pci->num_viewport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct resource *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) start = mem->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) end = mem->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Disable BARs for inbound access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ks_pcie_set_dbi_mode(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ks_pcie_clear_dbi_mode(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (ks_pcie->is_am6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) val = ilog2(OB_WIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Using Direct 1:1 mapping of RC <-> PCI memory space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) for (i = 0; i < num_viewport && (start < end); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) lower_32_bits(start) | OB_ENABLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) upper_32_bits(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) start += OB_WIN_SIZE * SZ_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) val |= OB_XLAT_EN_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) unsigned int devfn, int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct pcie_port *pp = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) CFG_FUNC(PCI_FUNC(devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!pci_is_root_bus(bus->parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) reg |= CFG_TYPE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return pp->va_cfg0_base + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static struct pci_ops ks_child_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) .map_bus = ks_pcie_other_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) .read = pci_generic_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .write = pci_generic_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * This sets BAR0 to enable inbound access for MSI_IRQ register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct pcie_port *pp = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* Configure and set up BAR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ks_pcie_set_dbi_mode(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Enable BAR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ks_pcie_clear_dbi_mode(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * For BAR0, just setting bus address for inbound writes (MSI) should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * be sufficient. Use physical address to avoid any conflicts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static struct pci_ops ks_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .map_bus = dw_pcie_own_conf_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .read = pci_generic_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .write = pci_generic_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .add_bus = ks_pcie_v3_65_add_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * ks_pcie_link_up() - Check if link up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static int ks_pcie_link_up(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) val &= PORT_LOGIC_LTSSM_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return (val == PORT_LOGIC_LTSSM_STATE_L0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static void ks_pcie_stop_link(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* Disable Link training */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) val &= ~LTSSM_EN_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static int ks_pcie_start_link(struct dw_pcie *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (dw_pcie_link_up(pci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) dev_dbg(dev, "link is already up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* Initiate Link Training */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static void ks_pcie_quirk(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct pci_bus *bus = dev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct pci_dev *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static const struct pci_device_id rc_pci_devids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) { 0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) bridge = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* look for the host bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) while (!pci_is_root_bus(bus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) bridge = bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) bus = bus->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * Keystone PCI controller has a h/w limitation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * 256 bytes maximum read request size. It can't handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * anything higher than this. So force this limit on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * all downstream devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (pci_match_id(rc_pci_devids, bridge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (pcie_get_readrq(dev) > 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) dev_info(&dev->dev, "limiting MRRS to 256\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) pcie_set_readrq(dev, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) unsigned int irq = desc->irq_data.hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) u32 offset = irq - ks_pcie->msi_host_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct dw_pcie *pci = ks_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) u32 vector, virq, reg, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev_dbg(dev, "%s, irq %d\n", __func__, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * The chained irq handler installation would have replaced normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * interrupt driver handler so we need to take care of mask/unmask and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * ack operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * shows 1, 9, 17, 25 and so forth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) for (pos = 0; pos < 4; pos++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!(reg & BIT(pos)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) vector = offset + (pos << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) virq = irq_linear_revmap(pp->irq_domain, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * @irq: IRQ line for legacy interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * @desc: Pointer to irq descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Traverse through pending legacy interrupts and invoke handler for each. Also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * takes care of interrupt controller level mask/ack operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) unsigned int irq = irq_desc_get_irq(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct dw_pcie *pci = ks_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) dev_dbg(dev, ": Handling legacy irq %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * The chained irq handler installation would have replaced normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * interrupt driver handler so we need to take care of mask/unmask and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * ack operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct device *dev = ks_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct device_node *np = ks_pcie->np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct device_node *intc_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct irq_data *irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int irq_count, irq, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (!IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!intc_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (ks_pcie->is_am6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) dev_warn(dev, "msi-interrupt-controller node is absent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) irq_count = of_irq_count(intc_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (!irq_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) for (i = 0; i < irq_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) irq = irq_of_parse_and_map(intc_np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!ks_pcie->msi_host_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) irq_data = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!irq_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ks_pcie->msi_host_irq = irq_data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) of_node_put(intc_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) of_node_put(intc_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct device *dev = ks_pcie->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct irq_domain *legacy_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct device_node *np = ks_pcie->np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct device_node *intc_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int irq_count, irq, ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (!intc_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * Since legacy interrupts are modeled as edge-interrupts in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * AM6, keep it disabled for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (ks_pcie->is_am6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) dev_warn(dev, "legacy-interrupt-controller node is absent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) irq_count = of_irq_count(intc_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (!irq_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) for (i = 0; i < irq_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) irq = irq_of_parse_and_map(intc_np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ks_pcie->legacy_host_irqs[i] = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) irq_set_chained_handler_and_data(irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ks_pcie_legacy_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) legacy_irq_domain =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) irq_domain_add_linear(intc_np, PCI_NUM_INTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) &ks_pcie_legacy_irq_domain_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!legacy_irq_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev_err(dev, "Failed to add irq domain for legacy irqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ks_pcie->legacy_irq_domain = legacy_irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) for (i = 0; i < PCI_NUM_INTX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) of_node_put(intc_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #ifdef CONFIG_ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * When a PCI device does not exist during config cycles, keystone host gets a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * bus error instead of returning 0xffffffff. This handler always returns 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * for this kind of faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned long instr = *(unsigned long *) instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if ((instr & 0x0e100090) == 0x00100090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int reg = (instr >> 12) & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) regs->uregs[reg] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) regs->ARM_pc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct regmap *devctrl_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct dw_pcie *pci = ks_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (IS_ERR(devctrl_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return PTR_ERR(devctrl_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ret = regmap_read(devctrl_regs, 0, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static int __init ks_pcie_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pp->bridge->ops = &ks_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!ks_pcie->is_am6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pp->bridge->child_ops = &ks_child_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ret = ks_pcie_config_legacy_irq(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ret = ks_pcie_config_msi_irq(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) dw_pcie_setup_rc(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) ks_pcie_stop_link(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ks_pcie_setup_rc_app_regs(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) pci->dbi_base + PCI_IO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ret = ks_pcie_init_id(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) #ifdef CONFIG_ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * PCIe access errors that result into OCP errors are caught by ARM as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * "External aborts"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) "Asynchronous external abort");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ks_pcie_start_link(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dw_pcie_wait_for_link(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static const struct dw_pcie_host_ops ks_pcie_host_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) .host_init = ks_pcie_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) .msi_host_init = ks_pcie_msi_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) .host_init = ks_pcie_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) .msi_host_init = ks_pcie_am654_msi_host_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct keystone_pcie *ks_pcie = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return ks_pcie_handle_error_irq(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct dw_pcie *pci = ks_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct pcie_port *pp = &pci->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ret = dw_pcie_host_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) dev_err(dev, "failed to initialize host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) u32 reg, size_t size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ks_pcie_set_dbi_mode(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dw_pcie_write(base + reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ks_pcie_clear_dbi_mode(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) .start_link = ks_pcie_start_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) .stop_link = ks_pcie_stop_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) .link_up = ks_pcie_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) .write_dbi2 = ks_pcie_am654_write_dbi2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ep->page_size = AM654_WIN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct dw_pcie *pci = ks_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) u8 int_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (int_pin == 0 || int_pin > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) enum pci_epc_irq_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) u16 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) case PCI_EPC_IRQ_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ks_pcie_am654_raise_legacy_irq(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) case PCI_EPC_IRQ_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) case PCI_EPC_IRQ_MSIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dev_err(pci->dev, "UNKNOWN IRQ type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static const struct pci_epc_features ks_pcie_am654_epc_features = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) .linkup_notifier = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) .msi_capable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) .msix_capable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) .bar_fixed_64bit = 1 << BAR_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) .bar_fixed_size[2] = SZ_1M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) .bar_fixed_size[3] = SZ_64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) .bar_fixed_size[4] = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) .bar_fixed_size[5] = SZ_1M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) .align = SZ_1M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static const struct pci_epc_features*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return &ks_pcie_am654_epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .ep_init = ks_pcie_am654_ep_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .raise_irq = ks_pcie_am654_raise_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) .get_features = &ks_pcie_am654_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct dw_pcie_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct dw_pcie *pci = ks_pcie->pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ep = &pci->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ep->phys_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ep->addr_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ret = dw_pcie_ep_init(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) dev_err(dev, "failed to initialize endpoint\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) int num_lanes = ks_pcie->num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) while (num_lanes--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) phy_power_off(ks_pcie->phy[num_lanes]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) phy_exit(ks_pcie->phy[num_lanes]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) int num_lanes = ks_pcie->num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) for (i = 0; i < num_lanes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ret = phy_reset(ks_pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto err_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ret = phy_init(ks_pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) goto err_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ret = phy_power_on(ks_pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) phy_exit(ks_pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) goto err_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) err_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) phy_power_off(ks_pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) phy_exit(ks_pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static int ks_pcie_set_mode(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct regmap *syscon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (IS_ERR(syscon))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ret = regmap_update_bits(syscon, 0, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) dev_err(dev, "failed to set pcie mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static int ks_pcie_am654_set_mode(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) enum dw_pcie_device_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct regmap *syscon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (IS_ERR(syscon))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) mask = AM654_PCIE_DEV_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) case DW_PCIE_RC_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) val = RC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) case DW_PCIE_EP_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) val = EP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dev_err(dev, "INVALID device type %d\n", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ret = regmap_update_bits(syscon, 0, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) dev_err(dev, "failed to set pcie mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .host_ops = &ks_pcie_host_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .version = 0x365A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .host_ops = &ks_pcie_am654_host_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .mode = DW_PCIE_RC_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .version = 0x490A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) .ep_ops = &ks_pcie_am654_ep_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .mode = DW_PCIE_EP_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) .version = 0x490A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static const struct of_device_id ks_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .type = "pci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) .data = &ks_pcie_rc_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) .compatible = "ti,keystone-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .data = &ks_pcie_am654_rc_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .compatible = "ti,am654-pcie-rc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) .data = &ks_pcie_am654_ep_of_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) .compatible = "ti,am654-pcie-ep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int __init ks_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) const struct dw_pcie_host_ops *host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) const struct dw_pcie_ep_ops *ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) const struct ks_pcie_of_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) enum dw_pcie_device_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct dw_pcie *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct keystone_pcie *ks_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct device_link **link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct gpio_desc *gpiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) unsigned int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct phy **phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) u32 num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) char name[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) data = (struct ks_pcie_of_data *)match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) version = data->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) host_ops = data->host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ep_ops = data->ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) mode = data->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!ks_pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (!pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (IS_ERR(ks_pcie->va_app_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return PTR_ERR(ks_pcie->va_app_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ks_pcie->app = *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) base = devm_pci_remap_cfg_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) ks_pcie->is_am6 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) pci->dbi_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) pci->dbi_base2 = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) pci->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) pci->ops = &ks_pcie_dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) pci->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) "ks-pcie-error-irq", ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) dev_err(dev, "failed to request error IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ret = of_property_read_u32(np, "num-lanes", &num_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) num_lanes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (!phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) for (i = 0; i < num_lanes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) snprintf(name, sizeof(name), "pcie-phy%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) phy[i] = devm_phy_optional_get(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (IS_ERR(phy[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) ret = PTR_ERR(phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) goto err_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (!phy[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!link[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) goto err_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) ks_pcie->np = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) ks_pcie->pci = pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ks_pcie->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) ks_pcie->num_lanes = num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ks_pcie->phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) gpiod = devm_gpiod_get_optional(dev, "reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (IS_ERR(gpiod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) ret = PTR_ERR(gpiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) dev_err(dev, "Failed to get reset GPIO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) goto err_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) ret = ks_pcie_enable_phy(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) dev_err(dev, "failed to enable phy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) goto err_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) platform_set_drvdata(pdev, ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dev_err(dev, "pm_runtime_get_sync failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) goto err_get_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (pci->version >= 0x480A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ret = ks_pcie_am654_set_mode(dev, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ret = ks_pcie_set_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) goto err_get_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) case DW_PCIE_RC_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) goto err_get_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * "Power Sequencing and Reset Signal Timings" table in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * indicates PERST# should be deasserted after minimum of 100us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * once REFCLK is stable. The REFCLK to the connector in RC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * mode is selected while enabling the PHY. So deassert PERST#
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * after 100 us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (gpiod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) gpiod_set_value_cansleep(gpiod, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) pci->pp.ops = host_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) goto err_get_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) case DW_PCIE_EP_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) goto err_get_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) pci->ep.ops = ep_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) goto err_get_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) dev_err(dev, "INVALID device type %d\n", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ks_pcie_enable_error_irq(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) err_get_sync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ks_pcie_disable_phy(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) err_link:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) while (--i >= 0 && link[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) device_link_del(link[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static int __exit ks_pcie_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct device_link **link = ks_pcie->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int num_lanes = ks_pcie->num_lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) ks_pcie_disable_phy(ks_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) while (num_lanes--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) device_link_del(link[num_lanes]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static struct platform_driver ks_pcie_driver __refdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) .probe = ks_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) .remove = __exit_p(ks_pcie_remove),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) .name = "keystone-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) .of_match_table = of_match_ptr(ks_pcie_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) builtin_platform_driver(ks_pcie_driver);