Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * pci_dn.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2001 Todd Inglett, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * PCI manipulation via device_nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/pci-bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/ppc-pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/eeh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * The function is used to find the firmware data of one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * specific PCI device, which is attached to the indicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * PCI bus. For VFs, their firmware data is linked to that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * one of PF's bridge. For other devices, their firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * data is linked to that of their bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct pci_bus *pbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct pci_dn *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	 * We probably have virtual bus which doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	 * have associated bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	pbus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	while (pbus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		if (pci_is_root_bus(pbus) || pbus->self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		pbus = pbus->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 * Except virtual bus, all PCI buses should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 * have device nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	dn = pci_bus_to_OF_node(pbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	pdn = dn ? PCI_DN(dn) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	return pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 				    int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct device_node *dn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct pci_dn *parent, *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct pci_dev *pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/* Fast path: fetch from PCI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	list_for_each_entry(pdev, &bus->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		if (pdev->devfn == devfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			if (pdev->dev.archdata.pci_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 				return pdev->dev.archdata.pci_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			dn = pci_device_to_OF_node(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	/* Fast path: fetch from device node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	pdn = dn ? PCI_DN(dn) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (pdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		return pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/* Slow path: fetch from firmware data hierarchy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	parent = pci_bus_to_pdn(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	list_for_each_entry(pdn, &parent->child_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (pdn->busno == bus->number &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)                     pdn->devfn == devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)                         return pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct pci_dn *parent, *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	/* Search device directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (pdev->dev.archdata.pci_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		return pdev->dev.archdata.pci_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	/* Check device node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	dn = pci_device_to_OF_node(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	pdn = dn ? PCI_DN(dn) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (pdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * VFs don't have device nodes. We hook their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * firmware data to PF's bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	parent = pci_bus_to_pdn(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	list_for_each_entry(pdn, &parent->child_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		if (pdn->busno == pdev->bus->number &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		    pdn->devfn == pdev->devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			return pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #ifdef CONFIG_EEH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	struct eeh_dev *edev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	/* Allocate EEH device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (!edev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	/* Associate EEH device with OF node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	pdn->edev = edev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	edev->pdn = pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	edev->bdfn = (pdn->busno << 8) | pdn->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	edev->controller = pdn->phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return edev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif /* CONFIG_EEH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #ifdef CONFIG_PCI_IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 					   int busno, int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct pci_dn *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	/* Except PHB, we always have the parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (!pdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	pdn->phb = parent->phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	pdn->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	pdn->busno = busno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	pdn->devfn = devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	pdn->pe_number = IODA_INVALID_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	INIT_LIST_HEAD(&pdn->child_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	INIT_LIST_HEAD(&pdn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	list_add_tail(&pdn->list, &parent->child_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	return pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct pci_dn *parent, *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	/* Only support IOV for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (WARN_ON(!pdev->is_physfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* Check if VFs have been populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	pdn = pci_get_pdn(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (!pdn || (pdn->flags & PCI_DN_FLAG_IOV_VF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	pdn->flags |= PCI_DN_FLAG_IOV_VF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	parent = pci_bus_to_pdn(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		struct eeh_dev *edev __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		pdn = add_one_sriov_vf_pdn(parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 					   pci_iov_virtfn_bus(pdev, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 					   pci_iov_virtfn_devfn(pdev, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		if (!pdn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			dev_warn(&pdev->dev, "%s: Cannot create firmware data for VF#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 				 __func__, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #ifdef CONFIG_EEH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		/* Create the EEH device for the VF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		edev = eeh_dev_init(pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		BUG_ON(!edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		/* FIXME: these should probably be populated by the EEH probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		edev->physfn = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		edev->vf_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #endif /* CONFIG_EEH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return pci_get_pdn(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void remove_sriov_vf_pdns(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct pci_dn *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	struct pci_dn *pdn, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	/* Only support IOV PF for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	if (WARN_ON(!pdev->is_physfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/* Check if VFs have been populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	pdn = pci_get_pdn(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (!pdn || !(pdn->flags & PCI_DN_FLAG_IOV_VF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	pdn->flags &= ~PCI_DN_FLAG_IOV_VF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	parent = pci_bus_to_pdn(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 * We might introduce flag to pci_dn in future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 * so that we can release VF's firmware data in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	 * a batch mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		struct eeh_dev *edev __maybe_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		list_for_each_entry_safe(pdn, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			&parent->child_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			    pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifdef CONFIG_EEH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			 * Release EEH state for this VF. The PCI core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			 * has already torn down the pci_dev for this VF, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			 * we're responsible to removing the eeh_dev since it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			 * has the same lifetime as the pci_dn that spawned it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			edev = pdn_to_eeh_dev(pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			if (edev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 				 * We allocate pci_dn's for the totalvfs count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				 * but only only the vfs that were activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 				 * have a configured PE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 				if (edev->pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 					eeh_pe_tree_remove(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 				pdn->edev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 				kfree(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #endif /* CONFIG_EEH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			if (!list_empty(&pdn->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 				list_del(&pdn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			kfree(pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #endif /* CONFIG_PCI_IOV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 					struct device_node *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	const __be32 *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct device_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	struct pci_dn *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #ifdef CONFIG_EEH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	struct eeh_dev *edev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (pdn == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	dn->data = pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	pdn->phb = hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	pdn->pe_number = IODA_INVALID_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	regs = of_get_property(dn, "reg", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		u32 addr = of_read_number(regs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		/* First register entry is addr (00BBSS00)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		pdn->busno = (addr >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		pdn->devfn = (addr >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	/* vendor/device IDs and class code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	regs = of_get_property(dn, "vendor-id", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	pdn->vendor_id = regs ? of_read_number(regs, 1) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	regs = of_get_property(dn, "device-id", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	pdn->device_id = regs ? of_read_number(regs, 1) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	regs = of_get_property(dn, "class-code", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	pdn->class_code = regs ? of_read_number(regs, 1) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	/* Extended config space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	/* Create EEH device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #ifdef CONFIG_EEH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	edev = eeh_dev_init(pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (!edev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		kfree(pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	/* Attach to parent node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	INIT_LIST_HEAD(&pdn->child_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	INIT_LIST_HEAD(&pdn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	parent = of_get_parent(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	pdn->parent = parent ? PCI_DN(parent) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	if (pdn->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		list_add_tail(&pdn->list, &pdn->parent->child_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	return pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) EXPORT_SYMBOL_GPL(pci_add_device_node_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) void pci_remove_device_node_info(struct device_node *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	struct pci_dn *pdn = dn ? PCI_DN(dn) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	struct device_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #ifdef CONFIG_EEH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (edev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		edev->pdn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (!pdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	WARN_ON(!list_empty(&pdn->child_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	list_del(&pdn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	/* Drop the parent pci_dn's ref to our backing dt node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	parent = of_get_parent(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		of_node_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	 * At this point we *might* still have a pci_dev that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * instantiated from this pci_dn. So defer free()ing it until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 * the pci_dev's release function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	pdev = pci_get_domain_bus_and_slot(pdn->phb->global_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			pdn->busno, pdn->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	if (pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		/* NB: pdev has a ref to dn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		pci_dbg(pdev, "marked pdn (from %pOF) as dead\n", dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		pdn->flags |= PCI_DN_FLAG_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		dn->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		kfree(pdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	pci_dev_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) EXPORT_SYMBOL_GPL(pci_remove_device_node_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * Traverse a device tree stopping each PCI device in the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  * This is done depth first.  As each node is processed, a "pre"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  * function is called and the children are processed recursively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  * The "pre" func returns a value.  If non-zero is returned from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  * the "pre" func, the traversal stops and this value is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  * This return value is useful when using traverse as a method of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  * finding a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  * NOTE: we do not run the func for devices that do not appear to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * be PCI except for the start node which we assume (this is good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * because the start node is often a phb which may be missing PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * properties).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * We use the class-code as an indicator. If we run into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  * one of these nodes we also assume its siblings are non-pci for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  * performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) void *pci_traverse_device_nodes(struct device_node *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 				void *(*fn)(struct device_node *, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 				void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	struct device_node *dn, *nextdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	/* We started with a phb, iterate all childs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	for (dn = start->child; dn; dn = nextdn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		const __be32 *classp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		u32 class = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		nextdn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		classp = of_get_property(dn, "class-code", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		if (classp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			class = of_read_number(classp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		if (fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			ret = fn(dn, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		/* If we are a PCI bridge, go down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		if (dn->child && ((class >> 8) == PCI_CLASS_BRIDGE_PCI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 				  (class >> 8) == PCI_CLASS_BRIDGE_CARDBUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			/* Depth first...do children */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			nextdn = dn->child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		else if (dn->sibling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			/* ok, try next sibling instead. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			nextdn = dn->sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		if (!nextdn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			/* Walk up to next valid sibling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 				dn = dn->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 				if (dn == start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 					return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 			} while (dn->sibling == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			nextdn = dn->sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) EXPORT_SYMBOL_GPL(pci_traverse_device_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static struct pci_dn *pci_dn_next_one(struct pci_dn *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 				      struct pci_dn *pdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	struct list_head *next = pdn->child_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	if (next != &pdn->child_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		return list_entry(next, struct pci_dn, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		if (pdn == root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		next = pdn->list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		if (next != &pdn->parent->child_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		pdn = pdn->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	return list_entry(next, struct pci_dn, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) void *traverse_pci_dn(struct pci_dn *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		      void *(*fn)(struct pci_dn *, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		      void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	struct pci_dn *pdn = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	/* Only scan the child nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	for (pdn = pci_dn_next_one(root, pdn); pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	     pdn = pci_dn_next_one(root, pdn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		ret = fn(pdn, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static void *add_pdn(struct device_node *dn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	struct pci_controller *hose = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	struct pci_dn *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	pdn = pci_add_device_node_info(hose, dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	if (!pdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /** 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)  * pci_devs_phb_init_dynamic - setup pci devices under this PHB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)  * phb: pci-to-host bridge (top-level bridge connecting to cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)  * This routine is called both during boot, (before the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)  * subsystem is set up, before kmalloc is valid) and during the 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)  * dynamic lpar operation of adding a PHB to a running system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) void pci_devs_phb_init_dynamic(struct pci_controller *phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	struct device_node *dn = phb->dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	struct pci_dn *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	/* PHB nodes themselves must not match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	pdn = pci_add_device_node_info(phb, dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	if (pdn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		pdn->devfn = pdn->busno = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		pdn->vendor_id = pdn->device_id = pdn->class_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		pdn->phb = phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		phb->pci_data = pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	/* Update dn->phb ptrs for new phb and children devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	pci_traverse_device_nodes(dn, add_pdn, phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /** 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)  * pci_devs_phb_init - Initialize phbs and pci devs under them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)  * This routine walks over all phb's (pci-host bridges) on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)  * system, and sets up assorted pci-related structures 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)  * (including pci info in the device node structs) for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  * pci device found underneath.  This routine runs once,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  * early in the boot sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int __init pci_devs_phb_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	struct pci_controller *phb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	/* This must be done first so the device nodes have valid pci info! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		pci_devs_phb_init_dynamic(phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) core_initcall(pci_devs_phb_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static void pci_dev_pdn_setup(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	struct pci_dn *pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	if (pdev->dev.archdata.pci_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	/* Setup the fast path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	pdn = pci_get_pdn(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	pdev->dev.archdata.pci_data = pdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup);