Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * PCIe endpoint driver for Renesas R-Car SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *  Copyright (c) 2020 Renesas Electronics Europe GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/pci-epc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "pcie-rcar.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define RCAR_EPC_MAX_FUNCTIONS		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /* Structure representing the PCIe interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) struct rcar_pcie_endpoint {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct rcar_pcie	pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	phys_addr_t		*ob_mapped_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	struct pci_epc_mem_window *ob_window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	u8			max_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned int		bar_to_atu[MAX_NR_INBOUND_MAPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	unsigned long		*ib_window_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	u32			num_ib_windows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	u32			num_ob_windows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	/* Set endpoint mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	rcar_pci_write_reg(pcie, 0, PCIEMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	/* Initialize default capabilities. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		   PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		   PCI_HEADER_TYPE_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	/* Write out the physical slot number = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	val = rcar_pci_read_reg(pcie, EXPCAP(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/* device supports fixed 128 bytes MPSS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	val &= ~GENMASK(2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	rcar_pci_write_reg(pcie, val, EXPCAP(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	val = rcar_pci_read_reg(pcie, EXPCAP(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	/* read requests size 128 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	val &= ~GENMASK(14, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	/* payload size 128 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	val &= ~GENMASK(7, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	rcar_pci_write_reg(pcie, val, EXPCAP(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	/* Set target link speed to 5.0 GT/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		   PCI_EXP_LNKSTA_CLS_5_0GB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/* Set the completion timer timeout to the maximum 50ms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* Terminate list of capabilities (Next Capability Offset=0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	/* flush modifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 				   phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	for (i = 0; i < ep->num_ob_windows; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (ep->ob_window[i].phys_base == addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 					   struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	struct rcar_pcie *pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	char outbound_name[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	ep->num_ob_windows = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		sprintf(outbound_name, "memory%u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		res = platform_get_resource_byname(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 						   IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 						   outbound_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			dev_err(pcie->dev, "missing outbound window %u\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (!devm_request_mem_region(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 					     resource_size(res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 					     outbound_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			dev_err(pcie->dev, "Cannot request memory region %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				outbound_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		ep->ob_window[i].phys_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		ep->ob_window[i].size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		/* controller doesn't support multiple allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		 * from same window, so set page_size to window size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		ep->ob_window[i].page_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	ep->num_ob_windows = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				  struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct rcar_pcie *pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct pci_epc_mem_window *window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	err = of_address_to_resource(dev->of_node, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	pcie->base = devm_ioremap_resource(dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (IS_ERR(pcie->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		return PTR_ERR(pcie->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 				     sizeof(*window), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (!ep->ob_window)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	rcar_pcie_parse_outbound_ranges(ep, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	err = of_property_read_u8(dev->of_node, "max-functions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 				  &ep->max_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		ep->max_functions = RCAR_EPC_MAX_FUNCTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				     struct pci_epf_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct rcar_pcie *pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (!fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		val = hdr->vendorid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		val = rcar_pci_read_reg(pcie, IDSETR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	val |= hdr->deviceid << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	rcar_pci_write_reg(pcie, val, IDSETR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	val = hdr->revid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	val |= hdr->progif_code << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	val |= hdr->subclass_code << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	val |= hdr->baseclass_code << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	rcar_pci_write_reg(pcie, val, IDSETR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (!fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		val = hdr->subsys_vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		val = rcar_pci_read_reg(pcie, SUBIDSETR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	val |= hdr->subsys_id << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	rcar_pci_write_reg(pcie, val, SUBIDSETR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (hdr->interrupt_pin > PCI_INTERRUPT_INTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	val = rcar_pci_read_reg(pcie, PCICONF(15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	val |= (hdr->interrupt_pin << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	rcar_pci_write_reg(pcie, val, PCICONF(15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 				struct pci_epf_bar *epf_bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	u64 size = 1ULL << fls64(epf_bar->size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	dma_addr_t cpu_addr = epf_bar->phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	enum pci_barno bar = epf_bar->barno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct rcar_pcie *pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (idx >= ep->num_ib_windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		dev_err(pcie->dev, "no free inbound window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		flags |= IO_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	ep->bar_to_atu[bar] = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	/* use 64-bit BARs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	set_bit(idx, ep->ib_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	set_bit(idx + 1, ep->ib_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (cpu_addr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		unsigned long nr_zeros = __ffs64(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		u64 alignment = 1ULL << nr_zeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		size = min(size, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	size = min(size, 1ULL << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	mask = roundup_pow_of_two(size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	mask &= ~0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	rcar_pcie_set_inbound(pcie, cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			      0x0, mask | flags, idx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	err = rcar_pcie_wait_for_phyrdy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		dev_err(pcie->dev, "phy not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				   struct pci_epf_bar *epf_bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	enum pci_barno bar = epf_bar->barno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	u32 atu_index = ep->bar_to_atu[bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	clear_bit(atu_index, ep->ib_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	clear_bit(atu_index + 1, ep->ib_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	struct rcar_pcie *pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	flags = rcar_pci_read_reg(pcie, MSICAP(fn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	rcar_pci_write_reg(pcie, flags, MSICAP(fn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct rcar_pcie *pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	flags = rcar_pci_read_reg(pcie, MSICAP(fn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (!(flags & MSICAP0_MSIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 				 phys_addr_t addr, u64 pci_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	struct rcar_pcie *pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	struct resource_entry win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	int window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	/* check if we have a link. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	err = rcar_pcie_wait_for_dl(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		dev_err(pcie->dev, "link not up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	window = rcar_pcie_ep_get_window(ep, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (window < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		dev_err(pcie->dev, "failed to get corresponding window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	memset(&win, 0x0, sizeof(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	memset(&res, 0x0, sizeof(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	res.start = pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	res.end = pci_addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	res.flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	win.res = &res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	rcar_pcie_set_outbound(pcie, window, &win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	ep->ob_mapped_addr[window] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 				    phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	struct resource_entry win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	for (idx = 0; idx < ep->num_ob_windows; idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		if (ep->ob_mapped_addr[idx] == addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	if (idx >= ep->num_ob_windows)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	memset(&win, 0x0, sizeof(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	memset(&res, 0x0, sizeof(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	win.res = &res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	rcar_pcie_set_outbound(&ep->pcie, idx, &win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	ep->ob_mapped_addr[idx] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 				    u8 fn, u8 intx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	struct rcar_pcie *pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	val = rcar_pci_read_reg(pcie, PCIEMSITXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if ((val & PCI_MSI_FLAGS_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	val = rcar_pci_read_reg(pcie, PCICONF(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if ((val & INTDIS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		dev_err(pcie->dev, "INTx message transmission is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	val = rcar_pci_read_reg(pcie, PCIEINTXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if ((val & ASTINTX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		dev_err(pcie->dev, "INTx is already asserted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	val |= ASTINTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	rcar_pci_write_reg(pcie, val, PCIEINTXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	usleep_range(1000, 1001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	val = rcar_pci_read_reg(pcie, PCIEINTXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	val &= ~ASTINTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	rcar_pci_write_reg(pcie, val, PCIEINTXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 				   u8 fn, u8 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	u16 msi_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	/* Check MSI enable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	val = rcar_pci_read_reg(pcie, MSICAP(fn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	if (!(val & MSICAP0_MSIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	/* Get MSI numbers from MME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	msi_count = 1 << msi_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (!interrupt_num || interrupt_num > msi_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	val = rcar_pci_read_reg(pcie, PCIEMSITXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 				  enum pci_epc_irq_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 				  u16 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	case PCI_EPC_IRQ_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		return rcar_pcie_ep_assert_intx(ep, fn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	case PCI_EPC_IRQ_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static int rcar_pcie_ep_start(struct pci_epc *epc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void rcar_pcie_ep_stop(struct pci_epc *epc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static const struct pci_epc_features rcar_pcie_epc_features = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	.linkup_notifier = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	.msi_capable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	.msix_capable = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	.reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	.bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	.bar_fixed_size[0] = 128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	.bar_fixed_size[2] = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	.bar_fixed_size[4] = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static const struct pci_epc_features*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	return &rcar_pcie_epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static const struct pci_epc_ops rcar_pcie_epc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	.write_header	= rcar_pcie_ep_write_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	.set_bar	= rcar_pcie_ep_set_bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	.clear_bar	= rcar_pcie_ep_clear_bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	.set_msi	= rcar_pcie_ep_set_msi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	.get_msi	= rcar_pcie_ep_get_msi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	.map_addr	= rcar_pcie_ep_map_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	.unmap_addr	= rcar_pcie_ep_unmap_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	.raise_irq	= rcar_pcie_ep_raise_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	.start		= rcar_pcie_ep_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	.stop		= rcar_pcie_ep_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	.get_features	= rcar_pcie_ep_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static const struct of_device_id rcar_pcie_ep_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	{ .compatible = "renesas,r8a774c0-pcie-ep", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	{ .compatible = "renesas,rcar-gen3-pcie-ep" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static int rcar_pcie_ep_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	struct rcar_pcie_endpoint *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	struct rcar_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	struct pci_epc *epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	pcie = &ep->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	pcie->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	err = pm_runtime_resume_and_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		dev_err(dev, "pm_runtime_resume_and_get failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		goto err_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	err = rcar_pcie_ep_get_pdata(ep, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		dev_err(dev, "failed to request resources: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		goto err_pm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	ep->num_ib_windows = MAX_NR_INBOUND_MAPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	ep->ib_window_map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 				     sizeof(long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	if (!ep->ib_window_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		dev_err(dev, "failed to allocate memory for inbound map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		goto err_pm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 					  sizeof(*ep->ob_mapped_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	if (!ep->ob_mapped_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		dev_err(dev, "failed to allocate memory for outbound memory pointers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		goto err_pm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (IS_ERR(epc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		dev_err(dev, "failed to create epc device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		err = PTR_ERR(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		goto err_pm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	epc->max_functions = ep->max_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	epc_set_drvdata(epc, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	rcar_pcie_ep_hw_init(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		dev_err(dev, "failed to initialize the epc memory space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		goto err_pm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) err_pm_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) err_pm_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static struct platform_driver rcar_pcie_ep_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		.name = "rcar-pcie-ep",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		.of_match_table = rcar_pcie_ep_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		.suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	.probe = rcar_pcie_ep_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) builtin_platform_driver(rcar_pcie_ep_driver);