Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Synopsys DesignWare PCIe Endpoint controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2017 Texas Instruments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: Kishon Vijay Abraham I <kishon@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/pci-epc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/pci-epf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "../../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct pci_epc *epc = ep->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	pci_epc_linkup(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	struct pci_epc *epc = ep->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	pci_epc_init_notify(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct dw_pcie_ep_func *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	list_for_each_entry(ep_func, &ep->func_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		if (ep_func->func_no == func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			return ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (ep->ops->func_conf_select)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		func_offset = ep->ops->func_conf_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	return func_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 				   enum pci_barno bar, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct dw_pcie_ep *ep = &pci->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	dw_pcie_writel_dbi2(pci, reg, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	dw_pcie_writel_dbi(pci, reg, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		dw_pcie_writel_dbi(pci, reg + 4, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	u8 func_no, funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	funcs = pci->ep.epc->max_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	for (func_no = 0; func_no < funcs; func_no++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		__dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		u8 cap_ptr, u8 cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	u8 cap_id, next_cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	u16 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (!cap_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	cap_id = (reg & 0x00ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (cap_id > PCI_CAP_ID_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (cap_id == cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	next_cap_ptr = (reg & 0xff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	u8 next_cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	u16 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	next_cap_ptr = (reg & 0x00ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 				   struct pci_epf_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			   hdr->subclass_code | hdr->baseclass_code << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			   hdr->cache_line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			   hdr->subsys_vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			   hdr->interrupt_pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 				  enum pci_barno bar, dma_addr_t cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 				  enum dw_pcie_as_type as_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	u32 free_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (free_win >= ep->num_ib_windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		dev_err(pci->dev, "No free inbound window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 				       as_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		dev_err(pci->dev, "Failed to program IB window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	ep->bar_to_atu[bar] = free_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	set_bit(free_win, ep->ib_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 				   phys_addr_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 				   u64 pci_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	u32 free_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (free_win >= ep->num_ob_windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		dev_err(pci->dev, "No free outbound window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 				     phys_addr, pci_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	set_bit(free_win, ep->ob_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	ep->outbound_addr[free_win] = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 				 struct pci_epf_bar *epf_bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	enum pci_barno bar = epf_bar->barno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	u32 atu_index = ep->bar_to_atu[bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	clear_bit(atu_index, ep->ib_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	ep->epf_bar[bar] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			      struct pci_epf_bar *epf_bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	enum pci_barno bar = epf_bar->barno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	size_t size = epf_bar->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	int flags = epf_bar->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	enum dw_pcie_as_type as_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (!(flags & PCI_BASE_ADDRESS_SPACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		as_type = DW_PCIE_AS_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		as_type = DW_PCIE_AS_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 				     epf_bar->phys_addr, as_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	dw_pcie_writel_dbi(pci, reg, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		dw_pcie_writel_dbi(pci, reg + 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	ep->epf_bar[bar] = epf_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			      u32 *atu_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	for (index = 0; index < ep->num_ob_windows; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		if (ep->outbound_addr[index] != addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		*atu_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 				  phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	u32 atu_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	ret = dw_pcie_find_index(ep, addr, &atu_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	clear_bit(atu_index, ep->ob_window_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			       phys_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			       u64 pci_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		dev_err(pci->dev, "Failed to enable address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	u32 val, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (!ep_func || !ep_func->msi_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	val = dw_pcie_readw_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	if (!(val & PCI_MSI_FLAGS_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	u32 val, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (!ep_func || !ep_func->msi_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	val = dw_pcie_readw_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	val &= ~PCI_MSI_FLAGS_QMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	dw_pcie_writew_dbi(pci, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	u32 val, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	if (!ep_func || !ep_func->msix_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	val = dw_pcie_readw_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (!(val & PCI_MSIX_FLAGS_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	val &= PCI_MSIX_FLAGS_QSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			       enum pci_barno bir, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	u32 val, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	if (!ep_func || !ep_func->msix_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	val = dw_pcie_readw_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	val &= ~PCI_MSIX_FLAGS_QSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	val |= interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	dw_pcie_writew_dbi(pci, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	val = offset | bir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	dw_pcie_writel_dbi(pci, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	dw_pcie_writel_dbi(pci, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 				enum pci_epc_irq_type type, u16 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	if (!ep->ops->raise_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static void dw_pcie_ep_stop(struct pci_epc *epc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	if (!pci->ops->stop_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	pci->ops->stop_link(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int dw_pcie_ep_start(struct pci_epc *epc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	if (!pci->ops->start_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	return pci->ops->start_link(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static const struct pci_epc_features*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	if (!ep->ops->get_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	return ep->ops->get_features(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static const struct pci_epc_ops epc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	.write_header		= dw_pcie_ep_write_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	.set_bar		= dw_pcie_ep_set_bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	.clear_bar		= dw_pcie_ep_clear_bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	.map_addr		= dw_pcie_ep_map_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	.unmap_addr		= dw_pcie_ep_unmap_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	.set_msi		= dw_pcie_ep_set_msi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	.get_msi		= dw_pcie_ep_get_msi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	.set_msix		= dw_pcie_ep_set_msix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	.get_msix		= dw_pcie_ep_get_msix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	.raise_irq		= dw_pcie_ep_raise_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	.start			= dw_pcie_ep_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	.stop			= dw_pcie_ep_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	.get_features		= dw_pcie_ep_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	dev_err(dev, "EP cannot trigger legacy IRQs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			     u8 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	struct pci_epc *epc = ep->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	unsigned int aligned_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	u16 msg_ctrl, msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	u32 msg_addr_lower, msg_addr_upper, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	u64 msg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	bool has_upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	if (!ep_func || !ep_func->msi_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	msg_ctrl = dw_pcie_readw_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	if (has_upper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		msg_data = dw_pcie_readw_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		msg_addr_upper = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		msg_data = dw_pcie_readw_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	msg_addr = ((u64)msg_addr_upper) << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 			(msg_addr_lower & ~aligned_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 				  epc->mem->window.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 				       u16 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	u32 msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	if (!ep_func || !ep_func->msix_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		   (interrupt_num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			      u16 interrupt_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	struct pci_epf_msix_tbl *msix_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct pci_epc *epc = ep->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	unsigned int func_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	u32 reg, msg_data, vec_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	unsigned int aligned_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	u32 tbl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	u64 msg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	u8 bir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	if (!ep_func || !ep_func->msix_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	func_offset = dw_pcie_ep_func_select(ep, func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	tbl_offset = dw_pcie_readl_dbi(pci, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	tbl_offset &= PCI_MSIX_TABLE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,  msg_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 				  epc->mem->window.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	writel(msg_data, ep->msi_mem + aligned_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msix_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	struct pci_epc *epc = ep->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 			      epc->mem->window.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	pci_epc_mem_exit(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	u32 header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	int pos = PCI_CFG_SPACE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	while (pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		header = dw_pcie_readl_dbi(pci, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		if (PCI_EXT_CAP_ID(header) == cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 			return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		pos = PCI_EXT_CAP_NEXT(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		if (!pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	unsigned int nbars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	u8 hdr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		   PCI_HEADER_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		dev_err(pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			"PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			hdr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	if (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 			PCI_REBAR_CTRL_NBAR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	dw_pcie_setup(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int dw_pcie_ep_init(struct dw_pcie_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	u8 func_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	struct pci_epc *epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	const struct pci_epc_features *epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	struct dw_pcie_ep_func *ep_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	INIT_LIST_HEAD(&ep->func_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	if (!pci->dbi_base || !pci->dbi_base2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		dev_err(dev, "Unable to read *num-ib-windows* property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	if (ep->num_ib_windows > MAX_IATU_IN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		dev_err(dev, "Invalid *num-ib-windows*\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		dev_err(dev, "Unable to read *num-ob-windows* property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	if (ep->num_ob_windows > MAX_IATU_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		dev_err(dev, "Invalid *num-ob-windows*\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	ep->ib_window_map = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 					 BITS_TO_LONGS(ep->num_ib_windows),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 					 sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	if (!ep->ib_window_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	ep->ob_window_map = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 					 BITS_TO_LONGS(ep->num_ob_windows),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 					 sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	if (!ep->ob_window_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 			    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	ep->outbound_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	if (pci->link_gen < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		pci->link_gen = of_pci_get_max_link_speed(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	epc = devm_pci_epc_create(dev, &epc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	if (IS_ERR(epc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		dev_err(dev, "Failed to create epc device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		return PTR_ERR(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	ep->epc = epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	epc_set_drvdata(epc, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		epc->max_functions = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	for (func_no = 0; func_no < epc->max_functions; func_no++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		if (!ep_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 		ep_func->func_no = func_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 							      PCI_CAP_ID_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 							       PCI_CAP_ID_MSIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		list_add_tail(&ep_func->list, &ep->func_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	if (ep->ops->ep_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		ep->ops->ep_init(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 			       ep->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 		dev_err(dev, "Failed to initialize address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 					     epc->mem->window.page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	if (!ep->msi_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	if (ep->ops->get_features) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		epc_features = ep->ops->get_features(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		if (epc_features->core_init_notifier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	return dw_pcie_ep_init_complete(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) EXPORT_SYMBOL_GPL(dw_pcie_ep_init);