Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * APM X-Gene PCIe Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2014 Applied Micro Circuits Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author: Tanmay Inamdar <tinamdar@apm.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/pci-acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/pci-ecam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define PCIECORE_CTLANDSTATUS		0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define PIM1_1L				0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define IBAR2				0x98
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define IR2MSK				0x9c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define PIM2_1L				0xa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define IBAR3L				0xb4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define IR3MSKL				0xbc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define PIM3_1L				0xc4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define OMR1BARL			0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define OMR2BARL			0x118
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define OMR3BARL			0x130
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define CFGBARL				0x154
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define CFGBARH				0x158
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define CFGCTL				0x15c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define RTDID				0x160
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define BRIDGE_CFG_0			0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define BRIDGE_CFG_4			0x2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define BRIDGE_STATUS_0			0x2600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define LINK_UP_MASK			0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define AXI_EP_CFG_ACCESS		0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define EN_COHERENCY			0xF0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define EN_REG				0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define OB_LO_IO			0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define XGENE_PCIE_VENDORID		0x10E8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define XGENE_PCIE_DEVICEID		0xE004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define SZ_1T				(SZ_1G*1024ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define PIPE_PHY_RATE_RD(src)		((0xc000 & (u32)(src)) >> 0xe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define XGENE_V1_PCI_EXP_CAP		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /* PCIe IP version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define XGENE_PCIE_IP_VER_UNKN		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define XGENE_PCIE_IP_VER_1		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define XGENE_PCIE_IP_VER_2		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) struct xgene_pcie_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct device_node	*node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct clk		*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	void __iomem		*csr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	void __iomem		*cfg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	unsigned long		cfg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	bool			link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	u32			version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	return readl(port->csr_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	writel(val, port->csr_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct pci_config_window *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (acpi_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return (struct xgene_pcie_port *)(bus->sysdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	cfg = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return (struct xgene_pcie_port *)(cfg->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * When the address bit [17:16] is 2'b01, the Configuration access will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * treated as Type 1 and it will be forwarded to external PCIe device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	if (bus->number >= (bus->primary + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		return port->cfg_base + AXI_EP_CFG_ACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	return port->cfg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * For Configuration request, RTDID register is used as Bus Number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * Device Number and Function number of the header fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	unsigned int b, d, f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	u32 rtdid_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	b = bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	d = PCI_SLOT(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	f = PCI_FUNC(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (!pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		rtdid_val = (b << 8) | (d << 3) | f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	xgene_pcie_writel(port, RTDID, rtdid_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	/* read the register back to ensure flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	xgene_pcie_readl(port, RTDID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * the translation from PCI bus to native BUS.  Entire DDR region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * is mapped into PCIe space using these registers, so it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * reached by DMA from EP devices.  The BAR0/1 of bridge should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * hidden during enumeration to avoid the sizing and resource allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * by PCIe core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 				     (offset == PCI_BASE_ADDRESS_1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 					int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if ((pci_is_root_bus(bus) && devfn != 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	    xgene_pcie_hide_rc_bars(bus, offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	xgene_pcie_set_rtdid_reg(bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return xgene_pcie_get_cfg_base(bus) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 				    int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	    PCIBIOS_SUCCESSFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * The v1 controller has a bug in its Configuration Request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 * Retry Status (CRS) logic: when CRS is enabled and we read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 * Vendor and Device ID of a non-existent device, the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	 * fabricates return data of 0xFFFF0001 ("device exists but is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 * ready") instead of 0xFFFFFFFF ("device does not exist").  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	 * causes the PCI core to retry the read until it times out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	 * Avoid this by not claiming to support CRS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	    ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (size <= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int xgene_get_csr_resource(struct acpi_device *adev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 				  struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct device *dev = &adev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	ret = acpi_dev_get_resources(adev, &list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 				     acpi_dev_filter_resource_type_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				     (void *) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		dev_err(dev, "failed to parse _CRS method, error code %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		dev_err(dev, "no IO and memory resources present in _CRS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	entry = list_first_entry(&list, struct resource_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	*res = *entry->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	acpi_dev_free_resource_list(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	struct device *dev = cfg->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct acpi_device *adev = to_acpi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct xgene_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	struct resource csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	ret = xgene_get_csr_resource(adev, &csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		dev_err(dev, "can't get CSR resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	port->csr_base = devm_pci_remap_cfg_resource(dev, &csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (IS_ERR(port->csr_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		return PTR_ERR(port->csr_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	port->cfg_base = cfg->win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	port->version = ipversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	cfg->priv = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) const struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	.bus_shift	= 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	.init		= xgene_v1_pcie_ecam_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	.pci_ops	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		.map_bus	= xgene_pcie_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		.read		= xgene_pcie_config_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		.write		= pci_generic_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	.bus_shift	= 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	.init		= xgene_v2_pcie_ecam_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	.pci_ops	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		.map_bus	= xgene_pcie_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		.read		= xgene_pcie_config_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		.write		= pci_generic_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #if defined(CONFIG_PCI_XGENE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 				  u32 flags, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	u32 val32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	val32 = xgene_pcie_readl(port, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	xgene_pcie_writel(port, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	val32 = xgene_pcie_readl(port, addr + 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	xgene_pcie_writel(port, addr + 0x04, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	val32 = xgene_pcie_readl(port, addr + 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	xgene_pcie_writel(port, addr + 0x04, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	val32 = xgene_pcie_readl(port, addr + 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	xgene_pcie_writel(port, addr + 0x08, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void xgene_pcie_linkup(struct xgene_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			      u32 *lanes, u32 *speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	u32 val32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	port->link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	if (val32 & LINK_UP_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		port->link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		*speed = PIPE_PHY_RATE_RD(val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		*lanes = val32 >> 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static int xgene_pcie_init_port(struct xgene_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	port->clk = clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	if (IS_ERR(port->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		dev_err(dev, "clock not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	rc = clk_prepare_enable(port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		dev_err(dev, "clock enable failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			      struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	port->csr_base = devm_pci_remap_cfg_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (IS_ERR(port->csr_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		return PTR_ERR(port->csr_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	port->cfg_base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	if (IS_ERR(port->cfg_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		return PTR_ERR(port->cfg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	port->cfg_addr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 				    struct resource *res, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 				    u64 cpu_addr, u64 pci_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	resource_size_t size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	u64 restype = resource_type(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	u64 mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	u32 min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	u32 flag = EN_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	if (restype == IORESOURCE_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		min_size = SZ_128M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		min_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		flag |= OB_LO_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	if (size >= min_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		mask = ~(size - 1) | flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 			 (u64)size, min_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	u64 addr = port->cfg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	xgene_pcie_writel(port, CFGCTL, EN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	struct resource_entry *window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	resource_list_for_each_entry(window, &bridge->windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		struct resource *res = window->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		u64 restype = resource_type(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		dev_dbg(dev, "%pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		switch (restype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		case IORESOURCE_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			xgene_pcie_setup_ob_reg(port, res, OMR3BARL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 						pci_pio_to_address(res->start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 						res->start - window->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		case IORESOURCE_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			if (res->flags & IORESOURCE_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 				xgene_pcie_setup_ob_reg(port, res, OMR2BARL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 							res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 							res->start -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 							window->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 				xgene_pcie_setup_ob_reg(port, res, OMR1BARL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 							res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 							res->start -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 							window->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		case IORESOURCE_BUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			dev_err(dev, "invalid resource %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	xgene_pcie_setup_cfg_reg(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 				  u64 pim, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	xgene_pcie_writel(port, pim_reg + 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			  upper_32_bits(pim) | EN_COHERENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * X-Gene PCIe support maximum 3 inbound memory regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  * This function helps to select a region based on size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		*ib_reg_mask |= (1 << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		*ib_reg_mask |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		*ib_reg_mask |= (1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 				    struct of_pci_range *range, u8 *ib_reg_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	void __iomem *cfg_base = port->cfg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	void *bar_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	u32 pim_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	u64 cpu_addr = range->cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	u64 pci_addr = range->pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	u64 size = range->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	u64 mask = ~(size - 1) | EN_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	u32 bar_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	int region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	if (region < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		dev_warn(dev, "invalid pcie dma-range config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	if (range->flags & IORESOURCE_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	switch (region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		writel(bar_low, bar_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		pim_reg = PIM1_1L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		xgene_pcie_writel(port, IBAR2, bar_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		pim_reg = PIM2_1L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		xgene_pcie_writel(port, IBAR3L, bar_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		pim_reg = PIM3_1L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	struct device_node *np = port->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct of_pci_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	struct of_pci_range_parser parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	u8 ib_reg_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	if (of_pci_dma_range_parser_init(&parser, np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		dev_err(dev, "missing dma-ranges property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	/* Get the dma-ranges from DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	for_each_of_pci_range(&parser, &range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		u64 end = range.cpu_addr + range.size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			range.flags, range.cpu_addr, end, range.pci_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* clear BAR configuration which was done by firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	for (i = PIM1_1L; i <= CFGCTL; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		xgene_pcie_writel(port, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int xgene_pcie_setup(struct xgene_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	u32 val, lanes = 0, speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	xgene_pcie_clear_config(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	/* setup the vendor and device IDs correctly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	xgene_pcie_writel(port, BRIDGE_CFG_0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	ret = xgene_pcie_map_ranges(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	ret = xgene_pcie_parse_map_dma_ranges(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	xgene_pcie_linkup(port, &lanes, &speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	if (!port->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		dev_info(dev, "(rc) link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static struct pci_ops xgene_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	.map_bus = xgene_pcie_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	.read = xgene_pcie_config_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	.write = pci_generic_config_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static int xgene_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	struct device_node *dn = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	struct xgene_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	port = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	port->node = of_node_get(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	port->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	port->version = XGENE_PCIE_IP_VER_UNKN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		port->version = XGENE_PCIE_IP_VER_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	ret = xgene_pcie_map_reg(port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	ret = xgene_pcie_init_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	ret = xgene_pcie_setup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	bridge->sysdata = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	bridge->ops = &xgene_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	return pci_host_probe(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static const struct of_device_id xgene_pcie_match_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	{.compatible = "apm,xgene-pcie",},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static struct platform_driver xgene_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		.name = "xgene-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		.of_match_table = of_match_ptr(xgene_pcie_match_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		.suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	.probe = xgene_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) builtin_platform_driver(xgene_pcie_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) #endif