Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2007, 2008, 2009, 2010, 2011 Cavium Networks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <asm/octeon/octeon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <asm/octeon/cvmx-npei-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <asm/octeon/cvmx-pciercx-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/octeon/cvmx-pescx-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/octeon/cvmx-pexp-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/octeon/cvmx-pemx-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/octeon/cvmx-dpi-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/octeon/cvmx-sli-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/octeon/cvmx-sriox-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/octeon/cvmx-helper-errata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/octeon/pci-octeon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define MPS_CN5XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define MPS_CN6XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) /* Module parameter to disable PCI probing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static int pcie_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) module_param(pcie_disable, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static int enable_pcie_14459_war;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static int enable_pcie_bus_num_war[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) union cvmx_pcie_address {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	uint64_t u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		uint64_t upper:2;	/* Normally 2 for XKPHYS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		uint64_t reserved_49_61:13;	/* Must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		uint64_t io:1;	/* 1 for IO space access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 		uint64_t did:5; /* PCIe DID = 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 		uint64_t subdid:3;	/* PCIe SubDID = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 		uint64_t reserved_36_39:4;	/* Must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		uint64_t es:2;	/* Endian swap = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		uint64_t port:2;	/* PCIe port 0,1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 		uint64_t reserved_29_31:3;	/* Must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		 * Selects the type of the configuration request (0 = type 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		 * 1 = type 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		uint64_t ty:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		/* Target bus number sent in the ID in the request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		uint64_t bus:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		 * Target device number sent in the ID in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		 * request. Note that Dev must be zero for type 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		 * configuration requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		uint64_t dev:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		/* Target function number sent in the ID in the request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		uint64_t func:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		 * Selects a register in the configuration space of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		 * the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		uint64_t reg:12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	} config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		uint64_t upper:2;	/* Normally 2 for XKPHYS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		uint64_t reserved_49_61:13;	/* Must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		uint64_t io:1;	/* 1 for IO space access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		uint64_t did:5; /* PCIe DID = 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		uint64_t subdid:3;	/* PCIe SubDID = 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		uint64_t reserved_36_39:4;	/* Must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		uint64_t es:2;	/* Endian swap = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		uint64_t port:2;	/* PCIe port 0,1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		uint64_t address:32;	/* PCIe IO address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	} io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		uint64_t upper:2;	/* Normally 2 for XKPHYS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		uint64_t reserved_49_61:13;	/* Must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		uint64_t io:1;	/* 1 for IO space access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		uint64_t did:5; /* PCIe DID = 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		uint64_t subdid:3;	/* PCIe SubDID = 3-6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		uint64_t reserved_36_39:4;	/* Must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		uint64_t address:36;	/* PCIe Mem address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	} mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) static int cvmx_pcie_rc_initialize(int pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * Return the Core virtual base address for PCIe IO access. IOs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * read/written as an offset from this address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * @pcie_port: PCIe port the IO is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * Returns 64bit Octeon IO base address for read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	union cvmx_pcie_address pcie_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	pcie_addr.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	pcie_addr.io.upper = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	pcie_addr.io.io = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	pcie_addr.io.did = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	pcie_addr.io.subdid = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	pcie_addr.io.es = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	pcie_addr.io.port = pcie_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return pcie_addr.u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * Size of the IO address region returned at address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * cvmx_pcie_get_io_base_address()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * @pcie_port: PCIe port the IO is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * Returns Size of the IO window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static inline uint64_t cvmx_pcie_get_io_size(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	return 1ull << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * Return the Core virtual base address for PCIe MEM access. Memory is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * read/written as an offset from this address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * @pcie_port: PCIe port the IO is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * Returns 64bit Octeon IO base address for read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	union cvmx_pcie_address pcie_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	pcie_addr.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	pcie_addr.mem.upper = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	pcie_addr.mem.io = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	pcie_addr.mem.did = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	pcie_addr.mem.subdid = 3 + pcie_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	return pcie_addr.u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * Size of the Mem address region returned at address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * cvmx_pcie_get_mem_base_address()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * @pcie_port: PCIe port the IO is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * Returns Size of the Mem window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	return 1ull << 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * Read a PCIe config space register indirectly. This is used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  * @pcie_port:	PCIe port to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * @cfg_offset: Address to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  * Returns Value read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		union cvmx_pescx_cfg_rd pescx_cfg_rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		pescx_cfg_rd.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		pescx_cfg_rd.s.addr = cfg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		return pescx_cfg_rd.s.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		union cvmx_pemx_cfg_rd pemx_cfg_rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		pemx_cfg_rd.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		pemx_cfg_rd.s.addr = cfg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		return pemx_cfg_rd.s.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * Write a PCIe config space register indirectly. This is used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * @pcie_port:	PCIe port to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * @cfg_offset: Address to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * @val:	Value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 				 uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		union cvmx_pescx_cfg_wr pescx_cfg_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		pescx_cfg_wr.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		pescx_cfg_wr.s.addr = cfg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		pescx_cfg_wr.s.data = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		union cvmx_pemx_cfg_wr pemx_cfg_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		pemx_cfg_wr.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		pemx_cfg_wr.s.addr = cfg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		pemx_cfg_wr.s.data = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * Build a PCIe config space request address for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * @pcie_port: PCIe port to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * @bus:       Sub bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * @dev:       Device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * @fn:	       Device sub function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * @reg:       Register to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * Returns 64bit Octeon IO address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 						     int dev, int fn, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	union cvmx_pcie_address pcie_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	union cvmx_pciercx_cfg006 pciercx_cfg006;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	pciercx_cfg006.u32 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	    cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	pcie_addr.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	pcie_addr.config.upper = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	pcie_addr.config.io = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	pcie_addr.config.did = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	pcie_addr.config.subdid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	pcie_addr.config.es = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	pcie_addr.config.port = pcie_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	pcie_addr.config.bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	pcie_addr.config.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	pcie_addr.config.func = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	pcie_addr.config.reg = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return pcie_addr.u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * Read 8bits from a Device's config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  * @pcie_port: PCIe port the device is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * @bus:       Sub bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * @dev:       Device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  * @fn:	       Device sub function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * @reg:       Register to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * Returns Result of the read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 				      int fn, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	uint64_t address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		return cvmx_read64_uint8(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		return 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * Read 16bits from a Device's config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * @pcie_port: PCIe port the device is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * @bus:       Sub bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * @dev:       Device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * @fn:	       Device sub function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * @reg:       Register to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * Returns Result of the read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 					int fn, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	uint64_t address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		return le16_to_cpu(cvmx_read64_uint16(address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * Read 32bits from a Device's config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * @pcie_port: PCIe port the device is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * @bus:       Sub bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  * @dev:       Device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  * @fn:	       Device sub function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  * @reg:       Register to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  * Returns Result of the read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 					int fn, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	uint64_t address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		return le32_to_cpu(cvmx_read64_uint32(address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		return 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * Write 8bits to a Device's config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * @pcie_port: PCIe port the device is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  * @bus:       Sub bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * @dev:       Device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  * @fn:	       Device sub function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * @reg:       Register to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * @val:       Value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 				    int reg, uint8_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	uint64_t address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		cvmx_write64_uint8(address, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * Write 16bits to a Device's config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * @pcie_port: PCIe port the device is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * @bus:       Sub bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * @dev:       Device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * @fn:	       Device sub function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  * @reg:       Register to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * @val:       Value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 				     int reg, uint16_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	uint64_t address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	if (address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		cvmx_write64_uint16(address, cpu_to_le16(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * Write 32bits to a Device's config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * @pcie_port: PCIe port the device is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * @bus:       Sub bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * @dev:       Device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * @fn:	       Device sub function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * @reg:       Register to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * @val:       Value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 				     int reg, uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	uint64_t address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	if (address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		cvmx_write64_uint32(address, cpu_to_le32(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * Initialize the RC config space CSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * @pcie_port: PCIe port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	union cvmx_pciercx_cfg030 pciercx_cfg030;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	union cvmx_pciercx_cfg070 pciercx_cfg070;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	union cvmx_pciercx_cfg001 pciercx_cfg001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	union cvmx_pciercx_cfg032 pciercx_cfg032;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	union cvmx_pciercx_cfg006 pciercx_cfg006;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	union cvmx_pciercx_cfg008 pciercx_cfg008;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	union cvmx_pciercx_cfg009 pciercx_cfg009;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	union cvmx_pciercx_cfg010 pciercx_cfg010;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	union cvmx_pciercx_cfg011 pciercx_cfg011;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	union cvmx_pciercx_cfg035 pciercx_cfg035;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	union cvmx_pciercx_cfg075 pciercx_cfg075;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	union cvmx_pciercx_cfg034 pciercx_cfg034;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	/* Max Payload Size (PCIE*_CFG030[MPS]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	/* Max Read Request Size (PCIE*_CFG030[MRRS]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	/* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	/* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		pciercx_cfg030.s.mps = MPS_CN5XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		pciercx_cfg030.s.mps = MPS_CN6XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	 * Enable relaxed order processing. This will allow devices to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	 * affect read response ordering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	pciercx_cfg030.s.ro_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	/* Enable no snoop processing. Not used by Octeon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	pciercx_cfg030.s.ns_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	/* Correctable error reporting enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	pciercx_cfg030.s.ce_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	/* Non-fatal error reporting enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	pciercx_cfg030.s.nfe_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	/* Fatal error reporting enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	pciercx_cfg030.s.fe_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	/* Unsupported request reporting enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	pciercx_cfg030.s.ur_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		union cvmx_npei_ctl_status2 npei_ctl_status2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		 * Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		 * PCIE*_CFG030[MPS].  Max Read Request Size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		 * (NPEI_CTL_STATUS2[MRRS]) must not exceed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		 * PCIE*_CFG030[MRRS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		/* Max payload size = 128 bytes for best Octeon DMA performance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		npei_ctl_status2.s.mps = MPS_CN5XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		/* Max read request size = 128 bytes for best Octeon DMA performance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		npei_ctl_status2.s.mrrs = MRRS_CN5XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		if (pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		 * Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		 * PCIE*_CFG030[MPS].  Max Read Request Size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		 * (DPI_SLI_PRTX_CFG[MRRS]) must not exceed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		 * PCIE*_CFG030[MRRS].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		union cvmx_dpi_sli_prtx_cfg prt_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		union cvmx_sli_s2m_portx_ctl sli_s2m_portx_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		prt_cfg.s.mps = MPS_CN6XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		prt_cfg.s.mrrs = MRRS_CN6XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		/* Max outstanding load request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		prt_cfg.s.molr = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	/* ECRC Generation (PCIE*_CFG070[GE,CE]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	pciercx_cfg070.s.ge = 1;	/* ECRC generation enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	pciercx_cfg070.s.ce = 1;	/* ECRC check enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * Access Enables (PCIE*_CFG001[MSAE,ME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 * ME and MSAE should always be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	 * Interrupt Disable (PCIE*_CFG001[I_DIS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	 * System Error Message Enable (PCIE*_CFG001[SEE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	pciercx_cfg001.s.msae = 1;	/* Memory space enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	pciercx_cfg001.s.me = 1;	/* Bus master enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	pciercx_cfg001.s.i_dis = 1;	/* INTx assertion disable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	pciercx_cfg001.s.see = 1;	/* SERR# enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* Advanced Error Recovery Message Enables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	/* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	/* Use CVMX_PCIERCX_CFG067 hardware default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	/* Active State Power Management (PCIE*_CFG032[ASLPC]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 * Link Width Mode (PCIERCn_CFG452[LME]) - Set during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	 * cvmx_pcie_rc_initialize_link()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	 * Primary Bus Number (PCIERCn_CFG006[PBNUM])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	 * We set the primary bus number to 1 so IDT bridges are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	 * happy. They don't like zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	pciercx_cfg006.u32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	pciercx_cfg006.s.pbnum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	pciercx_cfg006.s.sbnum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	pciercx_cfg006.s.subbnum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	 * Memory-mapped I/O BAR (PCIERCn_CFG008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * Most applications should disable the memory-mapped I/O BAR by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 * setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	pciercx_cfg008.u32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	pciercx_cfg008.s.mb_addr = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	pciercx_cfg008.s.ml_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 * Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	 * Most applications should disable the prefetchable BAR by setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	 * PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	 * PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	pciercx_cfg009.s.lmem_base = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	pciercx_cfg009.s.lmem_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	pciercx_cfg010.s.umem_base = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	pciercx_cfg011.s.umem_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	 * System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 * PME Interrupt Enables (PCIERCn_CFG035[PMEIE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 * Advanced Error Recovery Interrupt Enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	 * (PCIERCn_CFG075[CERE,NFERE,FERE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	 * HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	 * PCIERCn_CFG034[DLLS_EN,CCINT_EN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  * port from reset to a link up state. Software can then begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * configuring the rest of the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  * @pcie_port: PCIe port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * Returns Zero on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	uint64_t start_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	union cvmx_pescx_ctl_status pescx_ctl_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	union cvmx_pciercx_cfg452 pciercx_cfg452;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	union cvmx_pciercx_cfg032 pciercx_cfg032;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	union cvmx_pciercx_cfg448 pciercx_cfg448;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	/* Set the lane width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	if (pescx_ctl_status.s.qlm_cfg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		/* We're in 8 lane (56XX) or 4 lane (54XX) mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		pciercx_cfg452.s.lme = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		/* We're in 4 lane (56XX) or 2 lane (52XX) mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		pciercx_cfg452.s.lme = 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * CN52XX pass 1.x has an errata where length mismatches on UR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 * responses can cause bus errors on 64bit memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 * reads. Turning off length error checking fixes this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		union cvmx_pciercx_cfg455 pciercx_cfg455;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		pciercx_cfg455.s.m_cpl_len_err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	/* Lane swap needs to be manually enabled for CN52XX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		pescx_ctl_status.s.lane_swp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	/* Bring up the link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	pescx_ctl_status.s.lnk_enb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	 * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		__cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	/* Wait for the link to come up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	start_cycle = cvmx_get_cycle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		if (cvmx_get_cycle() - start_cycle > 2 * octeon_get_clock_rate()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		__delay(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	} while (pciercx_cfg032.s.dlla == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	/* Clear all pending errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 * Update the Replay Time Limit. Empirically, some PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 * devices take a little longer to respond than expected under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	 * load. As a workaround for this we configure the Replay Time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	 * Limit to the value expected for a 512 byte MPS instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	 * our actual 256 byte MPS. The numbers below are directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	 * from the PCIe spec table 3-4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	switch (pciercx_cfg032.s.nlw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	case 1:		/* 1 lane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		pciercx_cfg448.s.rtl = 1677;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	case 2:		/* 2 lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		pciercx_cfg448.s.rtl = 867;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	case 4:		/* 4 lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		pciercx_cfg448.s.rtl = 462;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	case 8:		/* 8 lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		pciercx_cfg448.s.rtl = 258;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static void __cvmx_increment_ba(union cvmx_sli_mem_access_subidx *pmas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		pmas->cn68xx.ba++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		pmas->s.ba++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * enumerate the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * @pcie_port: PCIe port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  * Returns Zero on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	int base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	u64 addr_swizzle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	union cvmx_ciu_soft_prst ciu_soft_prst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	union cvmx_pescx_bist_status pescx_bist_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	union cvmx_pescx_bist_status2 pescx_bist_status2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	union cvmx_npei_ctl_status npei_ctl_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	union cvmx_npei_mem_access_ctl npei_mem_access_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	union cvmx_npei_mem_access_subidx mem_access_subid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	union cvmx_npei_dbg_data npei_dbg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	union cvmx_pescx_ctl_status2 pescx_ctl_status2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	union cvmx_pciercx_cfg032 pciercx_cfg032;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	union cvmx_npei_bar1_indexx bar1_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	 * Make sure we aren't trying to setup a target mode interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	 * in host mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 * Make sure a CN52XX isn't trying to bring up port 1 when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	 * is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * PCIe switch arbitration mode. '0' == fixed priority NPEI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * PCIe0, then PCIe1. '1' == round robin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	npei_ctl_status.s.arb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	/* Allow up to 0x20 config retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	npei_ctl_status.s.cfg_rtry = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 * CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 * don't reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		npei_ctl_status.s.p0_ntags = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		npei_ctl_status.s.p1_ntags = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	/* Bring the PCIe out of reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		 * The EBH5200 board swapped the PCIe reset lines on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		 * the board. As a workaround for this bug, we bring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		 * both PCIe ports out of reset at the same time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		 * instead of on separate calls. So for port 0, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		 * bring both out of reset and do nothing on port 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		if (pcie_port == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			 * After a chip reset the PCIe will also be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			 * reset. If it isn't, most likely someone is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			 * trying to init it again without a proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			 * PCIe reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			if (ciu_soft_prst.s.soft_prst == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 				/* Reset the ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 				ciu_soft_prst.s.soft_prst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 				ciu_soft_prst.s.soft_prst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 				cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 				/* Wait until pcie resets the ports. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 				udelay(2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			ciu_soft_prst.s.soft_prst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			ciu_soft_prst.s.soft_prst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		 * The normal case: The PCIe ports are completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		 * separate and can be brought out of reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		 * independently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		if (pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		 * After a chip reset the PCIe will also be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		 * reset. If it isn't, most likely someone is trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		 * to init it again without a proper PCIe reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		if (ciu_soft_prst.s.soft_prst == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			/* Reset the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			ciu_soft_prst.s.soft_prst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			if (pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 				cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 				cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			/* Wait until pcie resets the ports. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			udelay(2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		if (pcie_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			ciu_soft_prst.s.soft_prst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			ciu_soft_prst.s.soft_prst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * Wait for PCIe reset to complete. Due to errata PCIE-700, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 * don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 * fixed number of cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	__delay(400000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	 * PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	 * CN56XX and CN52XX, so we only probe it on newer chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		/* Clear PCLK_RUN so we can check if the clock is running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		pescx_ctl_status2.s.pclk_run = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		/* Now that we cleared PCLK_RUN, wait for it to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		 * again telling us the clock is running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 					  union cvmx_pescx_ctl_status2, pclk_run, ==, 1, 10000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 * Check and make sure PCIe came out of reset. If it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 * the board probably hasn't wired the clocks up and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * interface should be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (pescx_ctl_status2.s.pcierst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	 * Check BIST2 status. If any bits are set skip this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	 * interface. This is an attempt to catch PCIE-813 on pass 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	 * parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (pescx_bist_status2.u64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			     pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	/* Check BIST status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (pescx_bist_status.u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			     pcie_port, CAST64(pescx_bist_status.u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	/* Initialize the config space CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	__cvmx_pcie_rc_initialize_config_space(pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	/* Bring the link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			     pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	/* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	npei_mem_access_ctl.s.max_word = 0;	/* Allow 16 words to combine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	npei_mem_access_ctl.s.timer = 127;	/* Wait up to 127 cycles for more data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	/* Setup Mem access SubDIDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	mem_access_subid.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	mem_access_subid.s.nmerge = 1;	/* Due to an errata on pass 1 chips, no merging is allowed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	mem_access_subid.s.esr = 1;	/* Endian-swap for Reads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	mem_access_subid.s.esw = 1;	/* Endian-swap for Writes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	mem_access_subid.s.nsr = 0;	/* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	mem_access_subid.s.nsw = 0;	/* Enable Snoop for Writes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	mem_access_subid.s.ror = 0;	/* Disable Relaxed Ordering for Reads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	mem_access_subid.s.row = 0;	/* Disable Relaxed Ordering for Writes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	mem_access_subid.s.ba = 0;	/* PCIe Adddress Bits <63:34>. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 * Setup mem access 12-15 for port 0, 16-19 for port 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 * supplying 36 bits of address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	 * Disable the peer to peer forwarding register. This must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	 * setup by the OS after it enumerates the bus and assigns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	 * addresses to the PCIe busses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	/* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	bar1_index.u32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	bar1_index.s.ca = 1;	   /* Not Cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	bar1_index.s.end_swp = 1;  /* Endian Swap mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	bar1_index.s.addr_v = 1;   /* Valid entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	base = pcie_port ? 16 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	/* Big endian swizzle for 32-bit PEXP_NCB register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) #ifdef __MIPSEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	addr_swizzle = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	addr_swizzle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 				    bar1_index.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		base++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		/* 256MB / 16 >> 22 == 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	 * Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	 * precedence where they overlap. It also overlaps with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	 * device addresses, so make sure the peer to peer forwarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	 * is set right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	 * Setup BAR2 attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * - PTLP_RO,CTLP_RO should normally be set (except for debug).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 * - WAIT_COM=0 will likely work for all applications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	 * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if (pcie_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		union cvmx_npei_ctl_port1 npei_ctl_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		npei_ctl_port.s.bar2_enb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		npei_ctl_port.s.bar2_esx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		npei_ctl_port.s.bar2_cax = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		npei_ctl_port.s.ptlp_ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		npei_ctl_port.s.ctlp_ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		npei_ctl_port.s.wait_com = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		npei_ctl_port.s.waitl_com = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		union cvmx_npei_ctl_port0 npei_ctl_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		npei_ctl_port.s.bar2_enb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		npei_ctl_port.s.bar2_esx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		npei_ctl_port.s.bar2_cax = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		npei_ctl_port.s.ptlp_ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		npei_ctl_port.s.ctlp_ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		npei_ctl_port.s.wait_com = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		npei_ctl_port.s.waitl_com = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	 * Both pass 1 and pass 2 of CN52XX and CN56XX have an errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	 * that causes TLP ordering to not be preserved after multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	 * PCIe port resets. This code detects this fault and corrects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	 * it by aligning the TLP counters properly. Another link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	 * reset is then performed. See PCIE-13340
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		union cvmx_npei_dbg_data dbg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		int old_in_fif_p_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		int in_fif_p_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		int out_p_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		 * Choose a write address of 1MB. It should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		 * harmless as all bars haven't been setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		 * Make sure at least in_p_offset have been executed before we try and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		 * read in_fif_p_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		i = in_p_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			cvmx_write64_uint32(write_address, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			__delay(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		 * Read the IN_FIF_P_COUNT from the debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		 * select. IN_FIF_P_COUNT can be unstable sometimes so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		 * read it twice with a write between the reads.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		 * way we can tell the value is good as it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		 * increment by one due to the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			old_in_fif_p_count = dbg_data.s.data & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			cvmx_write64_uint32(write_address, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			__delay(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			in_fif_p_count = dbg_data.s.data & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		} while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		/* Update in_fif_p_count for it's offset with respect to out_p_count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		/* Read the OUT_P_COUNT from the debug select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		out_p_count = (dbg_data.s.data>>1) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		/* Check that the two counters are aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		if (out_p_count != in_fif_p_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			while (in_fif_p_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				cvmx_write64_uint32(write_address, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				__delay(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 				in_fif_p_count = (in_fif_p_count + 1) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			 * The EBH5200 board swapped the PCIe reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			 * lines on the board. This means we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			 * bring both links down and up, which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			 * cause the PCIe0 to need alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			 * again. Lots of messages will be displayed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			 * but everything should work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				(pcie_port == 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				cvmx_pcie_rc_initialize(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			/* Rety bringing this port up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	/* Display the link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)   * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * port from reset to a link up state. Software can then begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  * configuring the rest of the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  * @pcie_port: PCIe port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  * Return Zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	uint64_t start_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	union cvmx_pemx_ctl_status pem_ctl_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	union cvmx_pciercx_cfg032 pciercx_cfg032;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	union cvmx_pciercx_cfg448 pciercx_cfg448;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	/* Bring up the link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	pem_ctl_status.s.lnk_enb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	/* Wait for the link to come up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	start_cycle = cvmx_get_cycle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		if (cvmx_get_cycle() - start_cycle >  octeon_get_clock_rate())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		__delay(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	} while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	 * Update the Replay Time Limit. Empirically, some PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	 * devices take a little longer to respond than expected under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	 * load. As a workaround for this we configure the Replay Time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	 * Limit to the value expected for a 512 byte MPS instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	 * our actual 256 byte MPS. The numbers below are directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	 * from the PCIe spec table 3-4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	switch (pciercx_cfg032.s.nlw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	case 1: /* 1 lane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		pciercx_cfg448.s.rtl = 1677;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	case 2: /* 2 lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		pciercx_cfg448.s.rtl = 867;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	case 4: /* 4 lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		pciercx_cfg448.s.rtl = 462;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	case 8: /* 8 lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		pciercx_cfg448.s.rtl = 258;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)  * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)  * the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * @pcie_port: PCIe port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  * Returns Zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	union cvmx_ciu_soft_prst ciu_soft_prst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	union cvmx_mio_rst_ctlx mio_rst_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	union cvmx_pemx_bar_ctl pemx_bar_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	union cvmx_pemx_ctl_status pemx_ctl_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	union cvmx_pemx_bist_status pemx_bist_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	union cvmx_pemx_bist_status2 pemx_bist_status2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	union cvmx_pciercx_cfg032 pciercx_cfg032;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	union cvmx_pciercx_cfg515 pciercx_cfg515;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	union cvmx_sli_ctl_portx sli_ctl_portx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	union cvmx_sli_mem_access_ctl sli_mem_access_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	union cvmx_sli_mem_access_subidx mem_access_subid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	union cvmx_sriox_status_reg sriox_status_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	union cvmx_pemx_bar1_indexx bar1_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (octeon_has_feature(OCTEON_FEATURE_SRIO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		/* Make sure this interface isn't SRIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			 * The CN66XX requires reading the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			 * MIO_QLMX_CFG register to figure out the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			 * port type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			union cvmx_mio_qlmx_cfg qlmx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			if (qlmx_cfg.s.qlm_spd == 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 				pr_notice("PCIe: Port %d is disabled, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 			switch (qlmx_cfg.s.qlm_spd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			case 0x1: /* SRIO 1x4 short */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			case 0x3: /* SRIO 1x4 long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			case 0x4: /* SRIO 2x2 short */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			case 0x6: /* SRIO 2x2 long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 				pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			case 0x9: /* SGMII */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 				pr_notice("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			case 0xb: /* XAUI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 				pr_notice("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 			case 0x0: /* PCIE gen2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			case 0x8: /* PCIE gen2 (alias) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			case 0x2: /* PCIE gen1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			case 0xa: /* PCIE gen1 (alias) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 				pr_notice("PCIe: Port %d is unknown, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			if (sriox_status_reg.s.srio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)     /* This code is so that the PCIe analyzer is able to see 63XX traffic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	pr_notice("PCIE : init for pcie analyzer.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	cvmx_helper_qlm_jtag_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	cvmx_helper_qlm_jtag_update(pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	/* Make sure we aren't trying to setup a target mode interface in host mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (!mio_rst_ctl.s.host_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		pr_notice("PCIe: Port %d in endpoint mode.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		if (pcie_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			union cvmx_ciu_qlm ciu_qlm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			ciu_qlm.s.txbypass = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			ciu_qlm.s.txdeemph = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			ciu_qlm.s.txmargin = 0x17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			union cvmx_ciu_qlm ciu_qlm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			ciu_qlm.s.txbypass = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			ciu_qlm.s.txdeemph = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			ciu_qlm.s.txmargin = 0x17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	/* Bring the PCIe out of reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	 * After a chip reset the PCIe will also be in reset. If it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	 * isn't, most likely someone is trying to init it again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	 * without a proper PCIe reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (ciu_soft_prst.s.soft_prst == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		/* Reset the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		ciu_soft_prst.s.soft_prst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		if (pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		/* Wait until pcie resets the ports. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		udelay(2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	if (pcie_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		ciu_soft_prst.s.soft_prst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		ciu_soft_prst.s.soft_prst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	/* Wait for PCIe reset to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	 * Check and make sure PCIe came out of reset. If it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	 * the board probably hasn't wired the clocks up and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	 * interface should be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), union cvmx_mio_rst_ctlx, rst_done, ==, 1, 10000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		pr_notice("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	/* Check BIST status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (pemx_bist_status.u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		pr_notice("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	/* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		pemx_bist_status2.u64 &= ~0x3full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	if (pemx_bist_status2.u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		pr_notice("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	/* Initialize the config space CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	__cvmx_pcie_rc_initialize_config_space(pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	/* Enable gen2 speed selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	pciercx_cfg515.s.dsc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	/* Bring the link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		 * Some gen1 devices don't handle the gen 2 training
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		 * correctly. Disable gen2 and try again with only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		 * gen1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		union cvmx_pciercx_cfg031 pciercx_cfg031;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		pciercx_cfg031.s.mls = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			pr_notice("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	/* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	sli_mem_access_ctl.s.max_word = 0;	/* Allow 16 words to combine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	sli_mem_access_ctl.s.timer = 127;	/* Wait up to 127 cycles for more data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	/* Setup Mem access SubDIDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	mem_access_subid.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	mem_access_subid.s.nmerge = 0;	/* Allow merging as it works on CN6XXX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	mem_access_subid.s.esr = 1;	/* Endian-swap for Reads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	mem_access_subid.s.esw = 1;	/* Endian-swap for Writes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	mem_access_subid.s.wtype = 0;	/* "No snoop" and "Relaxed ordering" are not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	mem_access_subid.s.rtype = 0;	/* "No snoop" and "Relaxed ordering" are not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	/* PCIe Adddress Bits <63:34>. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		mem_access_subid.cn68xx.ba = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		mem_access_subid.s.ba = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	 * Setup mem access 12-15 for port 0, 16-19 for port 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	 * supplying 36 bits of address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		/* Set each SUBID to extend the addressable range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		__cvmx_increment_ba(&mem_access_subid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	 * Disable the peer to peer forwarding register. This must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	 * setup by the OS after it enumerates the bus and assigns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	 * addresses to the PCIe busses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	 * Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	 * precedence where they overlap. It also overlaps with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	 * device addresses, so make sure the peer to peer forwarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	 * is set right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	 * Setup BAR2 attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	 * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	 * - PTLP_RO,CTLP_RO should normally be set (except for debug).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	 * - WAIT_COM=0 will likely work for all applications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	 * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	pemx_bar_ctl.s.bar1_siz = 3;  /* 256MB BAR1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	pemx_bar_ctl.s.bar2_enb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	pemx_bar_ctl.s.bar2_esx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	pemx_bar_ctl.s.bar2_cax = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	sli_ctl_portx.s.ptlp_ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	sli_ctl_portx.s.ctlp_ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	sli_ctl_portx.s.wait_com = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	sli_ctl_portx.s.waitl_com = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	/* BAR1 follows BAR2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	bar1_index.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	bar1_index.s.ca = 1;	   /* Not Cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	bar1_index.s.end_swp = 1;  /* Endian Swap mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	bar1_index.s.addr_v = 1;   /* Valid entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		/* 256MB / 16 >> 22 == 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	 * Allow config retries for 250ms. Count is based off the 5Ghz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	 * SERDES clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	/* Display the link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	pr_notice("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  * @pcie_port: PCIe port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  * Returns Zero on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static int cvmx_pcie_rc_initialize(int pcie_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (octeon_has_feature(OCTEON_FEATURE_NPEI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /* Above was cvmx-pcie.c, below original pcie.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  * Map a PCI device to the appropriate interrupt line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  * @dev:    The Linux PCI device structure for the device to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)  * @slot:   The slot number for this device on __BUS 0__. Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)  *		 enumerates through all the bridges and figures out the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)  *		 slot on Bus 0 where this device eventually hooks to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)  * @pin:    The PCI interrupt pin read from the device, then swizzled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)  *		 as it goes through each bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)  * Returns Interrupt number for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	 * The EBH5600 board with the PCI to PCIe bridge mistakenly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	 * wires the first slot for both device id 2 and interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	 * A. According to the PCI spec, device id 2 should be C. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	 * following kludge attempts to fix this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	if (strstr(octeon_board_type_string(), "EBH5600") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	    dev->bus && dev->bus->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		 * Iterate all the way up the device chain and find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		 * the root bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		while (dev->bus && dev->bus->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			dev = to_pci_dev(dev->bus->bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		 * If the root bus is number 0 and the PEX 8114 is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		 * root, assume we are behind the miswired bus. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		 * need to correct the swizzle level by two. Yuck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		if ((dev->bus->number == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		    (dev->vendor == 0x10b5) && (dev->device == 0x8114)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			 * The pin field is one based, not zero. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 			 * need to swizzle it by minus two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			pin = ((pin - 3) & 3) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	 * The -1 is because pin starts with one, not zero. It might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	 * be that this equation needs to include the slot number, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	 * I don't have hardware to check that against.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	return pin - 1 + OCTEON_IRQ_PCI_INT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static	void set_cfg_read_retry(u32 retry_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	union cvmx_pemx_ctl_status pemx_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	pemx_ctl.s.cfg_rtry = retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static u32 disable_cfg_read_retry(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	u32 retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	union cvmx_pemx_ctl_status pemx_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	retry_cnt =  pemx_ctl.s.cfg_rtry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	pemx_ctl.s.cfg_rtry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	return retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static int is_cfg_retry(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	union cvmx_pemx_int_sum pemx_int_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	pemx_int_sum.u64 = cvmx_read_csr(CVMX_PEMX_INT_SUM(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (pemx_int_sum.s.crs_dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)  * Read a value from configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static int octeon_pcie_read_config(unsigned int pcie_port, struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 				   unsigned int devfn, int reg, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 				   u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	union octeon_cvmemctl cvmmemctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	union octeon_cvmemctl cvmmemctl_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	int bus_number = bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	int cfg_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	int retry_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	int max_retry_cnt = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	u32 cfg_retry_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	cvmmemctl_save.u64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	 * For the top level bus make sure our hardware bus number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	 * matches the software one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	if (bus->parent == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		if (enable_pcie_bus_num_war[pcie_port])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			bus_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 			union cvmx_pciercx_cfg006 pciercx_cfg006;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 			pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 					     CVMX_PCIERCX_CFG006(pcie_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 			if (pciercx_cfg006.s.pbnum != bus_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 				pciercx_cfg006.s.pbnum = bus_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 				pciercx_cfg006.s.sbnum = bus_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 				pciercx_cfg006.s.subbnum = bus_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 				cvmx_pcie_cfgx_write(pcie_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 					    CVMX_PCIERCX_CFG006(pcie_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 					    pciercx_cfg006.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	 * PCIe only has a single device connected to Octeon. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	 * always device ID 0. Don't bother doing reads for other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	 * device IDs on the first segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	if ((bus->parent == NULL) && (devfn >> 3 != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	 * The following is a workaround for the CN57XX, CN56XX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	 * CN55XX, and CN54XX errata with PCIe config reads from non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	 * existent devices.  These chips will hang the PCIe link if a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	 * config read is performed that causes a UR response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		 * For our EBH5600 board, port 0 has a bridge with two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		 * PCI-X slots. We need a new special checks to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		 * sure we only probe valid stuff.  The PCIe->PCI-X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		 * bridge only respondes to device ID 0, function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		 * 0-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		if ((bus->parent == NULL) && (devfn >= 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		 * The PCI-X slots are device ID 2,3. Choose one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		 * the below "if" blocks based on what is plugged into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		 * the board.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		/* Use this option if you aren't using either slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		if (bus_number == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) #elif 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		 * Use this option if you are using the first slot but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		 * not the second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		if ((bus_number == 2) && (devfn >> 3 != 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) #elif 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		 * Use this option if you are using the second slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		 * but not the first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		if ((bus_number == 2) && (devfn >> 3 != 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) #elif 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		/* Use this opion if you are using both slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		if ((bus_number == 2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		    !((devfn == (2 << 3)) || (devfn == (3 << 3))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		/* The following #if gives a more complicated example. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		   the required checks for running a Nitrox CN16XX-NHBX in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		   slot of the EBH5600. This card has a PLX PCIe bridge with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		   four Nitrox PLX parts behind it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		/* PLX bridge with 4 ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		if ((bus_number == 4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		    !((devfn >> 3 >= 1) && (devfn >> 3 <= 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		/* Nitrox behind PLX 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		if ((bus_number == 5) && (devfn >> 3 != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		/* Nitrox behind PLX 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		if ((bus_number == 6) && (devfn >> 3 != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		/* Nitrox behind PLX 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		if ((bus_number == 7) && (devfn >> 3 != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		/* Nitrox behind PLX 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		if ((bus_number == 8) && (devfn >> 3 != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		 * Shorten the DID timeout so bus errors for PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		 * config reads from non existent devices happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		 * faster. This allows us to continue booting even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		 * the above "if" checks are wrong.  Once one of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		 * errors happens, the PCIe port is dead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		cvmmemctl.u64 = cvmmemctl_save.u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		cvmmemctl.s.didtto = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		__write_64bit_c0_register($11, 7, cvmmemctl.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		cfg_retry_cnt = disable_cfg_read_retry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	pr_debug("pcie_cfg_rd port=%d b=%d devfn=0x%03x reg=0x%03x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		 " size=%d ", pcie_port, bus_number, devfn, reg, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			*val = cvmx_pcie_config_read32(pcie_port, bus_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 				devfn >> 3, devfn & 0x7, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			*val = cvmx_pcie_config_read16(pcie_port, bus_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 				devfn >> 3, devfn & 0x7, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			*val = cvmx_pcie_config_read8(pcie_port, bus_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 				devfn >> 3, devfn & 0x7, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			if (OCTEON_IS_MODEL(OCTEON_CN63XX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 				set_cfg_read_retry(cfg_retry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			(enable_pcie_14459_war)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			cfg_retry = is_cfg_retry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			retry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			if (retry_cnt > max_retry_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 				pr_err(" pcie cfg_read retries failed. retry_cnt=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 				       retry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 				cfg_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	} while (cfg_retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		set_cfg_read_retry(cfg_retry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	pr_debug("val=%08x  : tries=%02d\n", *val, retry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		write_c0_cvmmemctl(cvmmemctl_save.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 				    int reg, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	return octeon_pcie_read_config(0, bus, devfn, reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 				    int reg, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	return octeon_pcie_read_config(1, bus, devfn, reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) static int octeon_dummy_read_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 				    int reg, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)  * Write a value to PCI configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 				    unsigned int devfn, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 				    int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	int bus_number = bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	if ((bus->parent == NULL) && (enable_pcie_bus_num_war[pcie_port]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		bus_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	pr_debug("pcie_cfg_wr port=%d b=%d devfn=0x%03x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		 " reg=0x%03x size=%d val=%08x\n", pcie_port, bus_number, devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		 reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 					 devfn & 0x7, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 					 devfn & 0x7, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 					devfn & 0x7, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 				     int reg, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	return octeon_pcie_write_config(0, bus, devfn, reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 				     int reg, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	return octeon_pcie_write_config(1, bus, devfn, reg, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 				     int reg, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) static struct pci_ops octeon_pcie0_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	.read	= octeon_pcie0_read_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	.write	= octeon_pcie0_write_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) static struct resource octeon_pcie0_mem_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	.name = "Octeon PCIe0 MEM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	.flags = IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static struct resource octeon_pcie0_io_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	.name = "Octeon PCIe0 IO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	.flags = IORESOURCE_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static struct pci_controller octeon_pcie0_controller = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	.pci_ops = &octeon_pcie0_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	.mem_resource = &octeon_pcie0_mem_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	.io_resource = &octeon_pcie0_io_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) static struct pci_ops octeon_pcie1_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	.read	= octeon_pcie1_read_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	.write	= octeon_pcie1_write_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) static struct resource octeon_pcie1_mem_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	.name = "Octeon PCIe1 MEM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	.flags = IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static struct resource octeon_pcie1_io_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	.name = "Octeon PCIe1 IO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	.flags = IORESOURCE_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static struct pci_controller octeon_pcie1_controller = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	.pci_ops = &octeon_pcie1_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	.mem_resource = &octeon_pcie1_mem_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	.io_resource = &octeon_pcie1_io_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) static struct pci_ops octeon_dummy_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	.read	= octeon_dummy_read_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	.write	= octeon_dummy_write_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static struct resource octeon_dummy_mem_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	.name = "Virtual PCIe MEM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	.flags = IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) static struct resource octeon_dummy_io_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	.name = "Virtual PCIe IO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	.flags = IORESOURCE_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) static struct pci_controller octeon_dummy_controller = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	.pci_ops = &octeon_dummy_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	.mem_resource = &octeon_dummy_mem_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	.io_resource = &octeon_dummy_io_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) static int device_needs_bus_num_war(uint32_t deviceid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) #define IDT_VENDOR_ID 0x111d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	if ((deviceid  & 0xffff) == IDT_VENDOR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)  * Initialize the Octeon PCIe controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)  * Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static int __init octeon_pcie_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	int host_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	int srio_war15205 = 0, port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	union cvmx_sli_ctl_portx sli_ctl_portx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	union cvmx_sriox_status_reg sriox_status_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	/* These chips don't have PCIe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	/* No PCIe simulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	if (octeon_is_simulation())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	/* Disable PCI if instructed on the command line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	if (pcie_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	/* Point pcibios_map_irq() to the PCIe version of it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	 * PCIe I/O range. It is based on port 0 but includes up until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	 * port 1's end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	ioport_resource.start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	ioport_resource.end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		cvmx_pcie_get_io_base_address(1) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	 * Create a dummy PCIe controller to swallow up bus 0. IDT bridges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	 * don't work if the primary bus number is zero. Here we add a fake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	 * PCIe controller that the kernel will give bus 0. This allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	 * us to not change the normal kernel bus enumeration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	octeon_dummy_controller.io_map_base = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	octeon_dummy_controller.mem_resource->start = (1ull<<48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	octeon_dummy_controller.mem_resource->end = (1ull<<48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	register_pci_controller(&octeon_dummy_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		union cvmx_npei_ctl_status npei_ctl_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		host_mode = npei_ctl_status.s.host_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		union cvmx_mio_rst_ctlx mio_rst_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		host_mode = mio_rst_ctl.s.host_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	if (host_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		pr_notice("PCIe: Initializing port 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			if (sriox_status_reg.s.srio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 				srio_war15205 += 1;	 /* Port is SRIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 				port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		result = cvmx_pcie_rc_initialize(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		if (result == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 			uint32_t device0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			/* Memory offsets are physical addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 			octeon_pcie0_controller.mem_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 				cvmx_pcie_get_mem_base_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			/* IO offsets are Mips virtual addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 			octeon_pcie0_controller.io_map_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 				CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 						(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			octeon_pcie0_controller.io_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			 * To keep things similar to PCI, we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 			 * device addresses at the same place as PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			 * uisng big bar support. This normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			 * translates to 4GB-256MB, which is the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 			 * as most x86 PCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			octeon_pcie0_controller.mem_resource->start =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 				cvmx_pcie_get_mem_base_address(0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 				(4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 			octeon_pcie0_controller.mem_resource->end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 				cvmx_pcie_get_mem_base_address(0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 				cvmx_pcie_get_mem_size(0) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 			 * Ports must be above 16KB for the ISA bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 			 * filtering in the PCI-X to PCI bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 			octeon_pcie0_controller.io_resource->start = 4 << 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 			octeon_pcie0_controller.io_resource->end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 				cvmx_pcie_get_io_size(0) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 			msleep(100); /* Some devices need extra time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 			register_pci_controller(&octeon_pcie0_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			device0 = cvmx_pcie_config_read32(0, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			enable_pcie_bus_num_war[0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 				device_needs_bus_num_war(device0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			srio_war15205 += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 			port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		host_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		/* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 			union cvmx_npei_dbg_data dbg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 			if (dbg_data.cn52xx.qlm0_link_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 				host_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		union cvmx_mio_rst_ctlx mio_rst_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		host_mode = mio_rst_ctl.s.host_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (host_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		pr_notice("PCIe: Initializing port 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 			OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			if (sriox_status_reg.s.srio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 				srio_war15205 += 1;	 /* Port is SRIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 				port = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		result = cvmx_pcie_rc_initialize(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		if (result == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			uint32_t device0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			/* Memory offsets are physical addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 			octeon_pcie1_controller.mem_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 				cvmx_pcie_get_mem_base_address(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			 * To calculate the address for accessing the 2nd PCIe device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			 * either 'io_map_base' (pci_iomap()), or 'mips_io_port_base'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 			 * (ioport_map()) value is added to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 			 * pci_resource_start(dev,bar)). The 'mips_io_port_base' is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 			 * only once based on first PCIe. Also changing 'io_map_base'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			 * based on first slot's value so that both the routines will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			 * work properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			octeon_pcie1_controller.io_map_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 				CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			/* IO offsets are Mips virtual addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			octeon_pcie1_controller.io_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 				cvmx_pcie_get_io_base_address(1) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 				cvmx_pcie_get_io_base_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			 * To keep things similar to PCI, we start device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 			 * addresses at the same place as PCI uisng big bar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 			 * support. This normally translates to 4GB-256MB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 			 * which is the same as most x86 PCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 			octeon_pcie1_controller.mem_resource->start =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 				cvmx_pcie_get_mem_base_address(1) + (4ul << 30) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 				(OCTEON_PCI_BAR1_HOLE_SIZE << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			octeon_pcie1_controller.mem_resource->end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 				cvmx_pcie_get_mem_base_address(1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 				cvmx_pcie_get_mem_size(1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 			 * Ports must be above 16KB for the ISA bus filtering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 			 * in the PCI-X to PCI bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 			octeon_pcie1_controller.io_resource->start =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 				cvmx_pcie_get_io_base_address(1) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 				cvmx_pcie_get_io_base_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 			octeon_pcie1_controller.io_resource->end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 				octeon_pcie1_controller.io_resource->start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 				cvmx_pcie_get_io_size(1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 			msleep(100); /* Some devices need extra time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 			register_pci_controller(&octeon_pcie1_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			device0 = cvmx_pcie_config_read32(1, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			enable_pcie_bus_num_war[1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 				device_needs_bus_num_war(device0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		pr_notice("PCIe: Port 1 not in root complex mode, skipping.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		/* CN63XX pass 1_x/2.0 errata PCIe-15205  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 			srio_war15205 += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			port = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	 * CN63XX pass 1_x/2.0 errata PCIe-15205 requires setting all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	 * of SRIO MACs SLI_CTL_PORT*[INT*_MAP] to similar value and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	 * all of PCIe Macs SLI_CTL_PORT*[INT*_MAP] to different value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	 * from the previous set values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		if (srio_war15205 == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 			sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 			sli_ctl_portx.s.inta_map = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 			sli_ctl_portx.s.intb_map = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 			sli_ctl_portx.s.intc_map = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 			sli_ctl_portx.s.intd_map = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 			cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(port), sli_ctl_portx.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 			sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(!port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 			sli_ctl_portx.s.inta_map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 			sli_ctl_portx.s.intb_map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			sli_ctl_portx.s.intc_map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 			sli_ctl_portx.s.intd_map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 			cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(!port), sli_ctl_portx.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	octeon_pci_dma_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) arch_initcall(octeon_pcie_setup);