Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * PCIe driver for Marvell Armada 370 and Armada XP SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/mbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "../pci-bridge-emul.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * PCIe unit register offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define PCIE_DEV_ID_OFF		0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define PCIE_CMD_OFF		0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define PCIE_DEV_REV_OFF	0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define PCIE_BAR_LO_OFF(n)	(0x0010 + ((n) << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define PCIE_BAR_HI_OFF(n)	(0x0014 + ((n) << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define PCIE_CAP_PCIEXP		0x0060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define PCIE_HEADER_LOG_4_OFF	0x0128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define PCIE_BAR_CTRL_OFF(n)	(0x1804 + (((n) - 1) * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define PCIE_WIN04_CTRL_OFF(n)	(0x1820 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define PCIE_WIN04_BASE_OFF(n)	(0x1824 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define PCIE_WIN04_REMAP_OFF(n)	(0x182c + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define PCIE_WIN5_CTRL_OFF	0x1880
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define PCIE_WIN5_BASE_OFF	0x1884
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define PCIE_WIN5_REMAP_OFF	0x188c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define PCIE_CONF_ADDR_OFF	0x18f8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define  PCIE_CONF_ADDR_EN		0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define  PCIE_CONF_REG(r)		((((r) & 0xf00) << 16) | ((r) & 0xfc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define  PCIE_CONF_BUS(b)		(((b) & 0xff) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define  PCIE_CONF_DEV(d)		(((d) & 0x1f) << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define  PCIE_CONF_FUNC(f)		(((f) & 0x7) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define  PCIE_CONF_ADDR(bus, devfn, where) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn))    | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	 PCIE_CONF_ADDR_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define PCIE_CONF_DATA_OFF	0x18fc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define PCIE_MASK_OFF		0x1910
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define  PCIE_MASK_ENABLE_INTS          0x0f000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define PCIE_CTRL_OFF		0x1a00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define  PCIE_CTRL_X1_MODE		0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define PCIE_STAT_OFF		0x1a04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define  PCIE_STAT_BUS                  0xff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define  PCIE_STAT_DEV                  0x1f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define  PCIE_STAT_LINK_DOWN		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define PCIE_RC_RTSTA		0x1a14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define PCIE_DEBUG_CTRL         0x1a60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define  PCIE_DEBUG_SOFT_RESET		BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) struct mvebu_pcie_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) /* Structure representing all PCIe interfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) struct mvebu_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct mvebu_pcie_port *ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	struct resource io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	struct resource realio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	struct resource mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct resource busn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	int nports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) struct mvebu_pcie_window {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	phys_addr_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	phys_addr_t remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) /* Structure representing one PCIe interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) struct mvebu_pcie_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	u32 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	u32 lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	int devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	unsigned int mem_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	unsigned int mem_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	unsigned int io_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	unsigned int io_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct gpio_desc *reset_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	char *reset_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	struct pci_bridge_emul bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	struct mvebu_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct mvebu_pcie_window memwin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct mvebu_pcie_window iowin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	u32 saved_pcie_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct resource regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	writel(val, port->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return readl(port->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	return port->io_target != -1 && port->io_attr != -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	stat = mvebu_readl(port, PCIE_STAT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	stat &= ~PCIE_STAT_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	stat |= nr << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	mvebu_writel(port, stat, PCIE_STAT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	stat = mvebu_readl(port, PCIE_STAT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	stat &= ~PCIE_STAT_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	stat |= nr << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	mvebu_writel(port, stat, PCIE_STAT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * Setup PCIE BARs and Address Decode Wins:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * BAR[0] -> internal registers (needed for MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * BAR[1] -> covers all DRAM banks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * BAR[2] -> Disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * WIN[0-3] -> DRAM bank[0-3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	const struct mbus_dram_target_info *dram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	dram = mv_mbus_dram_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	/* First, disable and clear BARs and windows. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	for (i = 1; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	/* Setup windows for DDR banks.  Count total DDR size on the fly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	for (i = 0; i < dram->num_cs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		const struct mbus_dram_window *cs = dram->cs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		mvebu_writel(port, cs->base & 0xffff0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			     PCIE_WIN04_BASE_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		mvebu_writel(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			     ((cs->size - 1) & 0xffff0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			     (cs->mbus_attr << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			     (dram->mbus_dram_target_id << 4) | 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			     PCIE_WIN04_CTRL_OFF(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		size += cs->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	/* Round up 'size' to the nearest power of two. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if ((size & (size - 1)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		size = 1 << fls(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	/* Setup BAR[1] to all DRAM banks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		     PCIE_BAR_CTRL_OFF(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	 * Point BAR[0] to the device's internal registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	u32 cmd, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	/* Point PCIe unit MBUS decode windows to DRAM space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	mvebu_pcie_setup_wins(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	/* Master + slave enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	cmd = mvebu_readl(port, PCIE_CMD_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	cmd |= PCI_COMMAND_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	cmd |= PCI_COMMAND_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	cmd |= PCI_COMMAND_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	mvebu_writel(port, cmd, PCIE_CMD_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	/* Enable interrupt lines A-D. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	mask = mvebu_readl(port, PCIE_MASK_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	mask |= PCIE_MASK_ENABLE_INTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	mvebu_writel(port, mask, PCIE_MASK_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 				 struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 				 u32 devfn, int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		     PCIE_CONF_ADDR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		*val = readb_relaxed(conf_data + (where & 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		*val = readw_relaxed(conf_data + (where & 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		*val = readl_relaxed(conf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 				 struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 				 u32 devfn, int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		     PCIE_CONF_ADDR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		writeb(val, conf_data + (where & 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		writew(val, conf_data + (where & 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		writel(val, conf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		return PCIBIOS_BAD_REGISTER_NUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * Remove windows, starting from the largest ones to the smallest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 				   phys_addr_t base, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	while (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		size_t sz = 1 << (fls(size) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		mvebu_mbus_del_window(base, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		base += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		size -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * MBus windows can only have a power of two size, but PCI BARs do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * have this constraint. Therefore, we have to split the PCI BAR into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  * areas each having a power of two size. We start from the largest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  * one (i.e highest order bit set in the size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 				   unsigned int target, unsigned int attribute,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 				   phys_addr_t base, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 				   phys_addr_t remap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	size_t size_mapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	while (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		size_t sz = 1 << (fls(size) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 							sz, remap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			phys_addr_t end = base + sz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 			dev_err(&port->pcie->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				"Could not create MBus window at [mem %pa-%pa]: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 				&base, &end, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			mvebu_pcie_del_windows(port, base - size_mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 					       size_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		size -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		size_mapped += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		base += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (remap != MVEBU_MBUS_NO_REMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			remap += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 				  unsigned int target, unsigned int attribute,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 				  const struct mvebu_pcie_window *desired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				  struct mvebu_pcie_window *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (desired->base == cur->base && desired->remap == cur->remap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	    desired->size == cur->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (cur->size != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		mvebu_pcie_del_windows(port, cur->base, cur->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		cur->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		cur->base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		 * If something tries to change the window while it is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		 * the change will not be done atomically. That would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		 * difficult to do in the general case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (desired->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	mvebu_pcie_add_windows(port, target, attribute, desired->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			       desired->size, desired->remap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	*cur = *desired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	struct mvebu_pcie_window desired = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	struct pci_bridge_emul_conf *conf = &port->bridge.conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	/* Are the new iobase/iolimit values invalid? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	if (conf->iolimit < conf->iobase ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	    conf->iolimitupper < conf->iobaseupper ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	    !(conf->command & PCI_COMMAND_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		mvebu_pcie_set_window(port, port->io_target, port->io_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 				      &desired, &port->iowin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (!mvebu_has_ioport(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		dev_WARN(&port->pcie->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			 "Attempt to set IO when IO is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	 * We read the PCI-to-PCI bridge emulated registers, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 * calculate the base address and size of the address decoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 * window to setup, according to the PCI-to-PCI bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * specifications. iobase is the bus address, port->iowin_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * is the CPU address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	desired.remap = ((conf->iobase & 0xF0) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			(conf->iobaseupper << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	desired.base = port->pcie->io.start + desired.remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			 (conf->iolimitupper << 16)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			desired.remap) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		       1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			      &port->iowin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct pci_bridge_emul_conf *conf = &port->bridge.conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	/* Are the new membase/memlimit values invalid? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	if (conf->memlimit < conf->membase ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	    !(conf->command & PCI_COMMAND_MEMORY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				      &desired, &port->memwin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * We read the PCI-to-PCI bridge emulated registers, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * calculate the base address and size of the address decoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * window to setup, according to the PCI-to-PCI bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 * specifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	desired.base = ((conf->membase & 0xFFF0) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		       desired.base + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			      &port->memwin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) static pci_bridge_emul_read_status_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 				     int reg, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	struct mvebu_pcie_port *port = bridge->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	case PCI_EXP_DEVCAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	case PCI_EXP_DEVCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 				 ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 				   PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	case PCI_EXP_LNKCAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		 * PCIe requires the clock power management capability to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		 * hard-wired to zero for downstream ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			 ~PCI_EXP_LNKCAP_CLKPM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	case PCI_EXP_LNKCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	case PCI_EXP_SLTCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		*value = PCI_EXP_SLTSTA_PDS << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	case PCI_EXP_RTSTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		*value = mvebu_readl(port, PCIE_RC_RTSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		return PCI_BRIDGE_EMUL_NOT_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	return PCI_BRIDGE_EMUL_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 				      int reg, u32 old, u32 new, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct mvebu_pcie_port *port = bridge->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	struct pci_bridge_emul_conf *conf = &bridge->conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	case PCI_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (!mvebu_has_ioport(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			conf->command &= ~PCI_COMMAND_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		if ((old ^ new) & PCI_COMMAND_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			mvebu_pcie_handle_iobase_change(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		if ((old ^ new) & PCI_COMMAND_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			mvebu_pcie_handle_membase_change(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	case PCI_IO_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		 * We keep bit 1 set, it is a read-only bit that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		 * indicates we support 32 bits addressing for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		 * I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		conf->iobase |= PCI_IO_RANGE_TYPE_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		conf->iolimit |= PCI_IO_RANGE_TYPE_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		mvebu_pcie_handle_iobase_change(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	case PCI_MEMORY_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		mvebu_pcie_handle_membase_change(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	case PCI_IO_BASE_UPPER16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		mvebu_pcie_handle_iobase_change(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	case PCI_PRIMARY_BUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 				      int reg, u32 old, u32 new, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct mvebu_pcie_port *port = bridge->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	case PCI_EXP_DEVCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		 * Armada370 data says these bits must always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		 * be zero when in root complex mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	case PCI_EXP_LNKCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		 * If we don't support CLKREQ, we must ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		 * CLKREQ enable bit always reads zero.  Since we haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		 * had this capability, and it's dependent on board wiring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		 * disable it for the time being.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	case PCI_EXP_RTSTA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		mvebu_writel(port, new, PCIE_RC_RTSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	.write_base = mvebu_pci_bridge_emul_base_conf_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * Initialize the configuration space of the PCI-to-PCI bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * associated with the given PCIe interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	struct pci_bridge_emul *bridge = &port->bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	bridge->conf.class_revision =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (mvebu_has_ioport(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		/* We support 32 bits I/O addressing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	 * Older mvebu hardware provides PCIe Capability structure only in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	 * version 1. New hardware provides it in version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	bridge->has_pcie = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	bridge->data = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	bridge->ops = &mvebu_pci_bridge_emul_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	return sys->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 						    struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 						    int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	for (i = 0; i < pcie->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		struct mvebu_pcie_port *port = &pcie->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		if (bus->number == 0 && port->devfn == devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		if (bus->number != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		    bus->number >= port->bridge.conf.secondary_bus &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		    bus->number <= port->bridge.conf.subordinate_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) /* PCI configuration space write function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			      int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	struct mvebu_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	struct mvebu_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	port = mvebu_pcie_find_port(pcie, bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	/* Access the emulated PCI-to-PCI bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if (bus->number == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		return pci_bridge_emul_conf_write(&port->bridge, where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 						  size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	if (!mvebu_pcie_link_up(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	/* Access the real PCIe interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 				    where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) /* PCI configuration space read function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			      int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	struct mvebu_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	struct mvebu_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	port = mvebu_pcie_find_port(pcie, bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		*val = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/* Access the emulated PCI-to-PCI bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (bus->number == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		return pci_bridge_emul_conf_read(&port->bridge, where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 						 size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (!mvebu_pcie_link_up(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		*val = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	/* Access the real PCIe interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				    where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) static struct pci_ops mvebu_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	.read = mvebu_pcie_rd_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	.write = mvebu_pcie_wr_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 						 const struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 						 resource_size_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 						 resource_size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 						 resource_size_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	if (dev->bus->number != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	 * On the PCI-to-PCI bridge side, the I/O windows must have at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	 * least a 64 KB size and the memory windows must have at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * least a 1 MB size. Moreover, MBus windows need to have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 * base address aligned on their size, and their size must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 * a power of two. This means that if the BAR doesn't have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	 * power of two size, several MBus windows will actually be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	 * created. We need to ensure that the biggest MBus window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	 * (which will be the first one) is aligned on its size, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	 * explains the rounddown_pow_of_two() being done here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	if (res->flags & IORESOURCE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		return round_up(start, max_t(resource_size_t, SZ_64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 					     rounddown_pow_of_two(size)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	else if (res->flags & IORESOURCE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return round_up(start, max_t(resource_size_t, SZ_1M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 					     rounddown_pow_of_two(size)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 					      struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 					      struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	ret = of_address_to_resource(np, 0, &port->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		return (void __iomem *)ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	return devm_ioremap_resource(&pdev->dev, &port->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) #define DT_FLAGS_TO_TYPE(flags)       (((flags) >> 24) & 0x03)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) #define    DT_TYPE_IO                 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) #define    DT_TYPE_MEM32              0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) #define DT_CPUADDR_TO_ATTR(cpuaddr)   (((cpuaddr) >> 48) & 0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			      unsigned long type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			      unsigned int *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			      unsigned int *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	const int na = 3, ns = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	const __be32 *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	int rlen, nranges, rangesz, pna, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	*tgt = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	*attr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	range = of_get_property(np, "ranges", &rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	if (!range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	pna = of_n_addr_cells(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	rangesz = pna + na + ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	nranges = rlen / sizeof(__be32) / rangesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	for (i = 0; i < nranges; i++, range += rangesz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		u32 flags = of_read_number(range, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		u32 slot = of_read_number(range + 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		u64 cpuaddr = of_read_number(range + na, pna);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		unsigned long rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			rtype = IORESOURCE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			rtype = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		if (slot == PCI_SLOT(devfn) && type == rtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			*attr = DT_CPUADDR_TO_ATTR(cpuaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) static int mvebu_pcie_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	struct mvebu_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	for (i = 0; i < pcie->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		struct mvebu_pcie_port *port = pcie->ports + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) static int mvebu_pcie_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	struct mvebu_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	for (i = 0; i < pcie->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		struct mvebu_pcie_port *port = pcie->ports + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		mvebu_pcie_setup_hw(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) static void mvebu_pcie_port_clk_put(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct mvebu_pcie_port *port = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	clk_put(port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct mvebu_pcie_port *port, struct device_node *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	enum of_gpio_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	int reset_gpio, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	port->pcie = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			 child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		port->lane = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 				    port->lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (!port->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	port->devfn = of_pci_get_devfn(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (port->devfn < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 				 &port->mem_target, &port->mem_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (resource_size(&pcie->io) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 				   &port->io_target, &port->io_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		port->io_target = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		port->io_attr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (reset_gpio == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		ret = reset_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (gpio_is_valid(reset_gpio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		unsigned long gpio_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 						  port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		if (!port->reset_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		if (flags & OF_GPIO_ACTIVE_LOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			dev_info(dev, "%pOF: reset gpio is active low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 				 child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			gpio_flags = GPIOF_ACTIVE_LOW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 				     GPIOF_OUT_INIT_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			gpio_flags = GPIOF_OUT_INIT_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 					    port->reset_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		port->reset_gpio = gpio_to_desc(reset_gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	port->clk = of_clk_get_by_name(child, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	if (IS_ERR(port->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		dev_err(dev, "%s: cannot get clock\n", port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		clk_put(port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	/* In the case of skipping, we need to free these */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	devm_kfree(dev, port->reset_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	port->reset_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	devm_kfree(dev, port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	port->name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * Power up a PCIe port.  PCIe requires the refclk to be stable for 100µs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * prior to releasing PERST.  See table 2-4 in section 2.6.2 AC Specifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * of the PCI Express Card Electromechanical Specification, 1.1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	ret = clk_prepare_enable(port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (port->reset_gpio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		of_property_read_u32(port->dn, "reset-delay-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				     &reset_udelay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		gpiod_set_value_cansleep(port->reset_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		msleep(reset_udelay / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  * Power down a PCIe port.  Strictly, PCIe requires us to place the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  * in D3hot state before asserting PERST#.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	gpiod_set_value_cansleep(port->reset_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	clk_disable_unprepare(port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  * so we need extra resource setup parsing our special DT properties encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  * the MEM and IO apertures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct device *dev = &pcie->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	/* Get the PCIe memory aperture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (resource_size(&pcie->mem) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		dev_err(dev, "invalid memory aperture size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	pcie->mem.name = "PCI MEM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	pci_add_resource(&bridge->windows, &pcie->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	/* Get the PCIe IO aperture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	mvebu_mbus_get_pcie_io_aperture(&pcie->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (resource_size(&pcie->io) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		pcie->realio.flags = pcie->io.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		pcie->realio.start = PCIBIOS_MIN_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		pcie->realio.end = min_t(resource_size_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 					 IO_SPACE_LIMIT - SZ_64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 					 resource_size(&pcie->io) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		pcie->realio.name = "PCI I/O";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		pci_add_resource(&bridge->windows, &pcie->realio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * This is a copy of pci_host_probe(), except that it does the I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  * remap as the last step, once we are sure we won't fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * It should be removed once the I/O remap error handling issue has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * been sorted out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	struct mvebu_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct pci_bus *bus, *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	ret = pci_scan_root_bus_bridge(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		dev_err(bridge->dev.parent, "Scanning root bridge failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	pcie = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	if (resource_size(&pcie->io) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			pci_ioremap_io(i, pcie->io.start + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	bus = bridge->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	 * We insert PCI resources into the iomem_resource and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	 * ioport_resource trees in either pci_bus_claim_resources()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 * or pci_bus_assign_resources().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	if (pci_has_flag(PCI_PROBE_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		pci_bus_claim_resources(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		pci_bus_size_bridges(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		pci_bus_assign_resources(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		list_for_each_entry(child, &bus->children, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			pcie_bus_configure_settings(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	pci_bus_add_devices(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static int mvebu_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct mvebu_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	struct device_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	int num, i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	pcie = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	pcie->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	ret = mvebu_pcie_parse_request_resources(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	num = of_get_available_child_count(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (!pcie->ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	for_each_available_child_of_node(np, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		struct mvebu_pcie_port *port = &pcie->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		ret = mvebu_pcie_parse_port(pcie, port, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			of_node_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		} else if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		port->dn = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	pcie->nports = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	for (i = 0; i < pcie->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		struct mvebu_pcie_port *port = &pcie->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		child = port->dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		ret = mvebu_pcie_powerup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		port->base = mvebu_pcie_map_registers(pdev, child, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		if (IS_ERR(port->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			dev_err(dev, "%s: cannot map registers\n", port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			port->base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			mvebu_pcie_powerdown(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		mvebu_pcie_setup_hw(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		mvebu_pcie_set_local_dev_nr(port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		mvebu_pci_bridge_emul_init(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	pcie->nports = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	bridge->sysdata = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	bridge->ops = &mvebu_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	bridge->align_resource = mvebu_pcie_align_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	return mvebu_pci_host_probe(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static const struct of_device_id mvebu_pcie_of_match_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	{ .compatible = "marvell,armada-xp-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	{ .compatible = "marvell,armada-370-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	{ .compatible = "marvell,dove-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	{ .compatible = "marvell,kirkwood-pcie", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static const struct dev_pm_ops mvebu_pcie_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static struct platform_driver mvebu_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		.name = "mvebu-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		.of_match_table = mvebu_pcie_of_match_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		/* driver unloading/unbinding currently not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		.suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		.pm = &mvebu_pcie_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	.probe = mvebu_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) builtin_platform_driver(mvebu_pcie_driver);