Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Contains common pci routines for ALL ppc platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * (based on pci_32.c and pci_64.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Port for PPC64 David Engebretsen, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *   Rework, based on alpha PCI code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Common pmac/prep/chrp pci routines. -- Cort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/shmem_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/vgaarb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <asm/pci-bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <asm/ppc-pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <asm/eeh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "../../../drivers/pci/pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) /* hose_spinlock protects accesses to the the phb_bitmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static DEFINE_SPINLOCK(hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) LIST_HEAD(hose_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define MAX_PHBS 0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * For dynamic PHB numbering: used/free PHBs tracking bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * Accesses to this bitmap should be protected by hose_spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) /* ISA Memory physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) resource_size_t isa_mem_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) EXPORT_SYMBOL(isa_mem_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static const struct dma_map_ops *pci_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	pci_dma_ops = dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * This function should run under locking protection, specifically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * hose_spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static int get_phb_number(struct device_node *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	int ret, phb_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	u32 prop_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	u64 prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	 * Try fixed PHB numbering first, by checking archs and reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	 * the respective device-tree properties. Firstly, try powernv by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	 * reading "ibm,opal-phbid", only present in OPAL environment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		prop = prop_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		phb_id = (int)(prop & (MAX_PHBS - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/* We need to be sure to not use the same PHB number twice. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		return phb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 * If not pseries nor powernv, or if fixed PHB numbering tried to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 * the same PHB number twice, then fallback to dynamic PHB numbering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	BUG_ON(phb_id >= MAX_PHBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	set_bit(phb_id, phb_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	return phb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	if (phb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	spin_lock(&hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	phb->global_number = get_phb_number(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	list_add_tail(&phb->list_node, &hose_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	spin_unlock(&hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	phb->dn = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	phb->is_dynamic = slab_is_available();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		int nid = of_node_to_nid(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		if (nid < 0 || !node_online(nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		PHB_SET_NODE(phb, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	return phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) void pcibios_free_controller(struct pci_controller *phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	spin_lock(&hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	/* Clear bit of phb_bitmap to allow reuse of this PHB number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	if (phb->global_number < MAX_PHBS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		clear_bit(phb->global_number, phb_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	list_del(&phb->list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	spin_unlock(&hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	if (phb->is_dynamic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		kfree(phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) EXPORT_SYMBOL_GPL(pcibios_free_controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * This function is used to call pcibios_free_controller()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * in a deferred manner: a callback from the PCI subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * _*DO NOT*_ call pcibios_free_controller() explicitly if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * this is used (or it may access an invalid *phb pointer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * The callback occurs when all references to the root bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * are dropped (e.g., child buses/devices and their users).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * It's called as .release_fn() of 'struct pci_host_bridge'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * which is associated with the 'struct pci_controller.bus'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * (root bus) - it expects .release_data to hold a pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * to 'struct pci_controller'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * In order to use it, register .release_fn()/release_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  * like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  * pci_set_host_bridge_release(bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  *                             pcibios_free_controller_deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  *                             (void *) phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * e.g. in the pcibios_root_bridge_prepare() callback from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * pci_create_root_bus().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct pci_controller *phb = (struct pci_controller *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 					 bridge->release_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	pcibios_free_controller(phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * The function is used to return the minimal alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * for memory or I/O windows of the associated P2P bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * By default, 4KiB alignment for I/O windows and 1MiB for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * memory windows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) resource_size_t pcibios_window_alignment(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 					 unsigned long type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	struct pci_controller *phb = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (phb->controller_ops.window_alignment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		return phb->controller_ops.window_alignment(bus, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 * PCI core will figure out the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	 * alignment: 4KiB for I/O and 1MiB for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 * memory window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (hose->controller_ops.setup_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		hose->controller_ops.setup_bridge(bus, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) void pcibios_reset_secondary_bus(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	if (phb->controller_ops.reset_secondary_bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		phb->controller_ops.reset_secondary_bus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	pci_reset_secondary_bus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) resource_size_t pcibios_default_alignment(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	if (ppc_md.pcibios_default_alignment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		return ppc_md.pcibios_default_alignment();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) #ifdef CONFIG_PCI_IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if (ppc_md.pcibios_iov_resource_alignment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	return pci_iov_resource_size(pdev, resno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (ppc_md.pcibios_sriov_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) int pcibios_sriov_disable(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	if (ppc_md.pcibios_sriov_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		return ppc_md.pcibios_sriov_disable(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) #endif /* CONFIG_PCI_IOV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static resource_size_t pcibios_io_size(const struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	return hose->pci_io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	return resource_size(&hose->io_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) int pcibios_vaddr_is_ioport(void __iomem *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	resource_size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	spin_lock(&hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	list_for_each_entry(hose, &hose_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		size = pcibios_io_size(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		if (address >= hose->io_base_virt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		    address < (hose->io_base_virt + size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	spin_unlock(&hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) unsigned long pci_address_to_pio(phys_addr_t address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	resource_size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	unsigned long ret = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	spin_lock(&hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	list_for_each_entry(hose, &hose_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		size = pcibios_io_size(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		if (address >= hose->io_base_phys &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		    address < (hose->io_base_phys + size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			unsigned long base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 				(unsigned long)hose->io_base_virt - _IO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			ret = base + (address - hose->io_base_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	spin_unlock(&hose_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) EXPORT_SYMBOL_GPL(pci_address_to_pio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * Return the domain number for this bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) int pci_domain_nr(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	return hose->global_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) EXPORT_SYMBOL(pci_domain_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) /* This routine is meant to be used early during boot, when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * PCI bus numbers have not yet been assigned, and you need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * issue PCI config cycles to an OF device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * It could also be used to "fix" RTAS config cycles if you want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * to set pci_assign_all_buses to 1 and still use RTAS for PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * config cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	while(node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		struct pci_controller *hose, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			if (hose->dn == node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 				return hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		node = node->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) struct pci_controller *pci_find_controller_for_domain(int domain_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	list_for_each_entry(hose, &hose_list, list_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		if (hose->global_number == domain_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			return hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * Reads the interrupt pin to determine if interrupt is use by card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * If the interrupt is used, then gets the interrupt line from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * openfirmware and sets it in the pci_dev and pci_config line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static int pci_read_irq_line(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	/* Try to get a mapping from the device-tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (virq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		u8 line, pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		/* If that fails, lets fallback to what is in the config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		 * space and map that through the default controller. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		 * also set the type to level low since that's what PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		 * interrupts are. If your platform does differently, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		 * either provide a proper interrupt tree or don't use this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		 * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		if (pin == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		    line == 0xff || line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			 line, pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		virq = irq_create_mapping(NULL, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (!virq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		pr_debug(" Failed to map !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	pr_debug(" Mapped to linux irq %d\n", virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	pci_dev->irq = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * Platform support for /proc/bus/pci/X/Y mmap()s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  *  -- paulus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	resource_size_t ioaddr = pci_resource_start(pdev, bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	if (!hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	/* Convert to an offset within this PCI controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * This one is used by /dev/mem and fbdev who have no clue about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  * PCI device, it tries to find the PCI device first and calls the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  * above routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) pgprot_t pci_phys_mem_access_prot(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 				  unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				  unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 				  pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	struct pci_dev *pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	struct resource *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (page_is_ram(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		return prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	prot = pgprot_noncached(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	for_each_pci_dev(pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			struct resource *rp = &pdev->resource[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			int flags = rp->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			/* Active and same type? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			if ((flags & IORESOURCE_MEM) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			/* In the range of this resource? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			if (offset < (rp->start & PAGE_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			    offset > rp->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			found = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		if (found->flags & IORESOURCE_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			prot = pgprot_noncached_wc(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		pci_dev_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		 (unsigned long long)offset, pgprot_val(prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	return prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) /* This provides legacy IO read access on a bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct resource *rp = &hose->io_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	/* Check if port can be supported by that bus. We only check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 * the ranges of the PHB though, not the bus itself as the rules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	 * for forwarding legacy cycles down bridges are not our problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 * here. So if the host bridge supports it, we do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	offset += port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (!(rp->flags & IORESOURCE_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (offset < rp->start || (offset + size) > rp->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	addr = hose->io_base_virt + port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	switch(size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		*((u8 *)val) = in_8(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		if (port & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		*((u16 *)val) = in_le16(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		if (port & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		*((u32 *)val) = in_le32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) /* This provides legacy IO write access on a bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct resource *rp = &hose->io_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	/* Check if port can be supported by that bus. We only check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	 * the ranges of the PHB though, not the bus itself as the rules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 * for forwarding legacy cycles down bridges are not our problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 * here. So if the host bridge supports it, we do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	offset += port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (!(rp->flags & IORESOURCE_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (offset < rp->start || (offset + size) > rp->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	addr = hose->io_base_virt + port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	/* WARNING: The generic code is idiotic. It gets passed a pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 * to what can be a 1, 2 or 4 byte quantity and always reads that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 * as a u32, which means that we have to correct the location of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * the data read within those 32 bits for size 1 and 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	switch(size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		out_8(addr, val >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		if (port & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		out_le16(addr, val >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		if (port & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		out_le32(addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) /* This provides legacy IO or memory mmap access on a bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) int pci_mmap_legacy_page_range(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			       struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			       enum pci_mmap_state mmap_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	resource_size_t offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	resource_size_t size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct resource *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		 pci_domain_nr(bus), bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		 mmap_state == pci_mmap_mem ? "MEM" : "IO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		 (unsigned long long)offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		 (unsigned long long)(offset + size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (mmap_state == pci_mmap_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		/* Hack alert !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		 * Because X is lame and can fail starting if it gets an error trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		 * to mmap legacy_mem (instead of just moving on without legacy memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		 * access) we fake it here by giving it anonymous memory, effectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		 * behaving just like /dev/zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		if ((offset + size) > hose->isa_mem_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			       "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			       current->comm, current->pid, pci_domain_nr(bus), bus->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			if (vma->vm_flags & VM_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 				return shmem_zero_setup(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		offset += hose->isa_mem_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		unsigned long roffset = offset + io_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		rp = &hose->io_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		if (!(rp->flags & IORESOURCE_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		if (roffset < rp->start || (roffset + size) > rp->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		offset += hose->io_base_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	vma->vm_pgoff = offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			       vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			       vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) void pci_resource_to_user(const struct pci_dev *dev, int bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			  const struct resource *rsrc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			  resource_size_t *start, resource_size_t *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	struct pci_bus_region region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (rsrc->flags & IORESOURCE_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		pcibios_resource_to_bus(dev->bus, &region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 					(struct resource *) rsrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		*start = region.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		*end = region.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	/* We pass a CPU physical address to userland for MMIO instead of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * BAR value because X is lame and expects to be able to use that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * to pass to /dev/mem!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	 * That means we may have 64-bit values where some apps only expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	 * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	*start = rsrc->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	*end = rsrc->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * @hose: newly allocated pci_controller to be setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * @dev: device node of the host bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * @primary: set if primary bus (32 bits only, soon to be deprecated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  * This function will parse the "ranges" property of a PCI host bridge device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  * node and setup the resource mapping of a pci controller based on its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * content.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  * Life would be boring if it wasn't for a few issues that we have to deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * with here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  *   - We can only cope with one IO space range and up to 3 Memory space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  *     ranges. However, some machines (thanks Apple !) tend to split their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  *     space into lots of small contiguous ranges. So we have to coalesce.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  *   - Some busses have IO space not starting at 0, which causes trouble with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  *     the way we do our IO resource renumbering. The code somewhat deals with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  *     it for 64 bits but I would expect problems on 32 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  *   - Some 32 bits platforms such as 4xx can have physical space larger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  *     32 bits so we need to use 64 bits values for the parsing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) void pci_process_bridge_OF_ranges(struct pci_controller *hose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 				  struct device_node *dev, int primary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	int memno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	struct of_pci_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	struct of_pci_range_parser parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	       dev, primary ? "(primary)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/* Check for ranges property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (of_pci_range_parser_init(&parser, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	/* Parse it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	for_each_of_pci_range(&parser, &range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		/* If we failed translation or got a zero-sized region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		 * (some FW try to feed us with non sensical zero sized regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		 * such as power3 which look like some kind of attempt at exposing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		 * the VGA memory hole)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		/* Act based on address space type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		switch (range.flags & IORESOURCE_TYPE_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		case IORESOURCE_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			       "  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			       range.cpu_addr, range.cpu_addr + range.size - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			       range.pci_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			/* We support only one IO range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			if (hose->pci_io_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 				printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 				       " \\--> Skipped (too many) !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) #ifdef CONFIG_PPC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			/* On 32 bits, limit I/O space to 16MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			if (range.size > 0x01000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 				range.size = 0x01000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			/* 32 bits needs to map IOs here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			hose->io_base_virt = ioremap(range.cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 						range.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			/* Expect trouble if pci_addr is not 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			if (primary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 				isa_io_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 					(unsigned long)hose->io_base_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) #endif /* CONFIG_PPC32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			/* pci_io_size and io_base_phys always represent IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			 * space starting at 0 so we factor in pci_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			hose->pci_io_size = range.pci_addr + range.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			hose->io_base_phys = range.cpu_addr - range.pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			/* Build resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			res = &hose->io_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			range.cpu_addr = range.pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		case IORESOURCE_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			       " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			       range.cpu_addr, range.cpu_addr + range.size - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			       range.pci_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			       (range.flags & IORESOURCE_PREFETCH) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 			       "Prefetch" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			/* We support only 3 memory ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			if (memno >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 				printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 				       " \\--> Skipped (too many) !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			/* Handles ISA memory hole space here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			if (range.pci_addr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 				if (primary || isa_mem_base == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 					isa_mem_base = range.cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 				hose->isa_mem_phys = range.cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 				hose->isa_mem_size = range.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			/* Build resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			hose->mem_offset[memno] = range.cpu_addr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 							range.pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			res = &hose->mem_resources[memno++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		if (res != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			res->name = dev->full_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			res->flags = range.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			res->start = range.cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			res->end = range.cpu_addr + range.size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			res->parent = res->child = res->sibling = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) /* Decide whether to display the domain number in /proc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) int pci_proc_domain(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		return hose->global_number != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (ppc_md.pcibios_root_bridge_prepare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		return ppc_md.pcibios_root_bridge_prepare(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) /* This header fixup will do the resource fixup for all devices as they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  * probed, but not for bridge ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) static void pcibios_fixup_resources(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (!hose) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		printk(KERN_ERR "No host bridge for PCI dev %s !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		       pci_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (dev->is_virtfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		struct resource *res = dev->resource + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		struct pci_bus_region reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		if (!res->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		/* If we're going to re-assign everything, we mark all resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		 * as unset (and 0-base them). In addition, we mark BARs starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		 * since in that case, we don't want to re-assign anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		pcibios_resource_to_bus(dev->bus, &reg, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		    (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			/* Only print message if not re-assigning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 				pr_debug("PCI:%s Resource %d %pR is unassigned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 					 pci_name(dev), i, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			res->end -= res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			res->start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			res->flags |= IORESOURCE_UNSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	/* Call machine specific resource fixup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (ppc_md.pcibios_fixup_resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		ppc_md.pcibios_fixup_resources(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) /* This function tries to figure out if a bridge resource has been initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  * by the firmware or not. It doesn't have to be absolutely bullet proof, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * things go more smoothly when it gets it right. It should covers cases such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 						 struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct pci_dev *dev = bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	resource_size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	struct pci_bus_region region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	u16 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	/* We don't do anything if PCI_PROBE_ONLY is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (pci_has_flag(PCI_PROBE_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	/* Job is a bit different between memory and IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (res->flags & IORESOURCE_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		pcibios_resource_to_bus(dev->bus, &region, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		/* If the BAR is non-0 then it's probably been initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		if (region.start != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		/* The BAR is 0, let's check if memory decoding is enabled on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		 * the bridge. If not, we consider it unassigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		pci_read_config_word(dev, PCI_COMMAND, &command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		if ((command & PCI_COMMAND_MEMORY) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		/* Memory decoding is enabled and the BAR is 0. If any of the bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		 * resources covers that starting address (0 then it's good enough for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		 * us for memory space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			    hose->mem_resources[i].start == hose->mem_offset[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		/* Well, it starts at 0 and we know it will collide so we may as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		 * well consider it as unassigned. That covers the Apple case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		/* If the BAR is non-0, then we consider it assigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		if (((res->start - offset) & 0xfffffffful) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		/* Here, we are a bit different than memory as typically IO space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		 * starting at low addresses -is- valid. What we do instead if that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		 * we consider as unassigned anything that doesn't have IO enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		 * in the PCI command register, and that's it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		pci_read_config_word(dev, PCI_COMMAND, &command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (command & PCI_COMMAND_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		/* It's starting at 0 and IO is disabled in the bridge, consider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		 * it unassigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) /* Fixup resources of a PCI<->PCI bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) static void pcibios_fixup_bridge(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct pci_dev *dev = bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	pci_bus_for_each_resource(bus, res, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		if (!res || !res->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		if (i >= 3 && bus->self->transparent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		/* If we're going to reassign everything, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		 * shrink the P2P resource to have size as being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		 * of 0 in order to save space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			res->flags |= IORESOURCE_UNSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			res->start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			res->end = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		/* Try to detect uninitialized P2P bridge resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		 * and clear them out so they get re-assigned later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		if (pcibios_uninitialized_bridge_resource(bus, res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			res->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			pr_debug("PCI:%s            (unassigned)\n", pci_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) void pcibios_setup_bus_self(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	/* Fix up the bus resources for P2P bridges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	if (bus->self != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		pcibios_fixup_bridge(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	/* Platform specific bus fixups. This is currently only used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	 * by fsl_pci and I'm hoping to get rid of it at some point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	if (ppc_md.pcibios_fixup_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		ppc_md.pcibios_fixup_bus(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	/* Setup bus DMA mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	phb = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (phb->controller_ops.dma_bus_setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		phb->controller_ops.dma_bus_setup(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) void pcibios_bus_add_device(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	/* Fixup NUMA node as it may not be setup yet by the generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	 * code and is needed by the DMA init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	/* Hook up default DMA ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	set_dma_ops(&dev->dev, pci_dma_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	/* Additional platform DMA/iommu setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	phb = pci_bus_to_host(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (phb->controller_ops.dma_dev_setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		phb->controller_ops.dma_dev_setup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	/* Read default IRQs and fixup if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	pci_read_irq_line(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (ppc_md.pci_irq_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		ppc_md.pci_irq_fixup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (ppc_md.pcibios_bus_add_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		ppc_md.pcibios_bus_add_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) int pcibios_add_device(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) #ifdef CONFIG_PCI_IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	if (ppc_md.pcibios_fixup_sriov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		ppc_md.pcibios_fixup_sriov(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) #endif /* CONFIG_PCI_IOV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) void pcibios_set_master(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	/* No special bus mastering setup handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void pcibios_fixup_bus(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	/* When called from the generic PCI probe, read PCI<->PCI bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * bases. This is -not- called when generating the PCI tree from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * the OF device-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	pci_read_bridge_bases(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	/* Now fixup the bus bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	pcibios_setup_bus_self(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) EXPORT_SYMBOL(pcibios_fixup_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static int skip_isa_ioresource_align(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	    !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * We need to avoid collisions with `mirrored' VGA ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  * and other strange ISA hardware, so we always want the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  * addresses to be allocated in the 0x000-0x0ff region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  * modulo 0x400.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  * Why? Because some silly external IO cards only decode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * the low 10 bits of the IO address. The 0x00-0xff region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  * is reserved for motherboard devices that decode all 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  * but we want to try to avoid allocating at 0x2900-0x2bff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  * which might have be mirrored at 0x0100-0x03ff..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) resource_size_t pcibios_align_resource(void *data, const struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 				resource_size_t size, resource_size_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct pci_dev *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	resource_size_t start = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	if (res->flags & IORESOURCE_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (skip_isa_ioresource_align(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		if (start & 0x300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			start = (start + 0x3ff) & ~0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) EXPORT_SYMBOL(pcibios_align_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)  * Reparent resource children of pr that conflict with res
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  * under res, and make res replace those children.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static int reparent_resources(struct resource *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				     struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	struct resource *p, **pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	struct resource **firstpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		if (p->end < res->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		if (res->end < p->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		if (p->start < res->start || p->end > res->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			return -1;	/* not completely contained */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		if (firstpp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			firstpp = pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	if (firstpp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		return -1;	/* didn't find any conflicting entries? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	res->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	res->child = *firstpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	res->sibling = *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	*firstpp = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	*pp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	for (p = res->child; p != NULL; p = p->sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		p->parent = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		pr_debug("PCI: Reparented %s %pR under %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			 p->name, p, res->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)  *  Handle resources of PCI devices.  If the world were perfect, we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)  *  just allocate all the resource regions and do nothing more.  It isn't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  *  On the other hand, we cannot just re-allocate all devices, as it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  *  require us to know lots of host bridge internals.  So we attempt to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  *  keep as much of the original configuration as possible, but tweak it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  *  when it's found to be wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  *  Known BIOS problems we have to work around:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  *	- I/O or memory regions not configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  *	- regions configured, but not enabled in the command register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  *	- bogus I/O addresses above 64K used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  *	- expansion ROMs left enabled (this may sound harmless, but given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  *	  the fact the PCI specs explicitly allow address decoders to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  *	  shared between expansion ROMs and other resource regions, it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  *	  at least dangerous)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  *  Our solution:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  *	    This gives us fixed barriers on where we can allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  *	(2) Allocate resources for all enabled devices.  If there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  *	    a collision, just mark the resource as unallocated. Also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  *	    disable expansion ROMs during this step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  *	(3) Try to allocate resources for disabled devices.  If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  *	    resources were assigned correctly, everything goes well,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  *	    if they weren't, they won't disturb allocation of other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  *	    resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  *	(4) Assign new addresses to resources which were either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  *	    not configured at all or misconfigured.  If explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  *	    requested by the user, configure expansion ROM address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  *	    as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static void pcibios_allocate_bus_resources(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	struct pci_bus *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	struct resource *res, *pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		 pci_domain_nr(bus), bus->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	pci_bus_for_each_resource(bus, res, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		if (!res || !res->flags || res->start > res->end || res->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		/* If the resource was left unset at this point, we clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		if (res->flags & IORESOURCE_UNSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			goto clear_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if (bus->parent == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			pr = (res->flags & IORESOURCE_IO) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 				&ioport_resource : &iomem_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			pr = pci_find_parent_resource(bus->self, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			if (pr == res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 				/* this happens when the generic PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				 * code (wrongly) decides that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 				 * bridge is transparent  -- paulus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 			 bus->self ? pci_name(bus->self) : "PHB", bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			 i, res, pr, (pr && pr->name) ? pr->name : "nil");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		if (pr && !(pr->flags & IORESOURCE_UNSET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			struct pci_dev *dev = bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			if (request_resource(pr, res) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			 * Must be a conflict with an existing entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			 * Move that entry (or entries) under the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			 * bridge resource and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			if (reparent_resources(pr, res) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			    pci_claim_bridge_resource(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 						i + PCI_BRIDGE_RESOURCES) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			i, bus->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	clear_resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		/* The resource might be figured out when doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		 * reassignment based on the resources required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		 * by the downstream PCI devices. Here we set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		 * the size of the resource to be 0 in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		 * save more space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		res->start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		res->end = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		res->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	list_for_each_entry(b, &bus->children, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		pcibios_allocate_bus_resources(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static inline void alloc_resource(struct pci_dev *dev, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	struct resource *pr, *r = &dev->resource[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		 pci_name(dev), idx, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	pr = pci_find_parent_resource(dev, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	    request_resource(pr, r) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		       " of device %s, will remap\n", idx, pci_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		if (pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			pr_debug("PCI:  parent is %p: %pR\n", pr, pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		/* We'll assign a new address later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		r->flags |= IORESOURCE_UNSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		r->end -= r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		r->start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static void __init pcibios_allocate_resources(int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	struct pci_dev *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	int idx, disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	u16 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	for_each_pci_dev(dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		pci_read_config_word(dev, PCI_COMMAND, &command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			r = &dev->resource[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			if (r->parent)		/* Already allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			if (!r->flags || (r->flags & IORESOURCE_UNSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 				continue;	/* Not assigned at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			/* We only allocate ROMs on pass 1 just in case they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 			 * have been screwed up by firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			if (idx == PCI_ROM_RESOURCE )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 				disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			if (r->flags & IORESOURCE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 				disabled = !(command & PCI_COMMAND_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 				disabled = !(command & PCI_COMMAND_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			if (pass == disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 				alloc_resource(dev, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		if (pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		r = &dev->resource[PCI_ROM_RESOURCE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		if (r->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			/* Turn the ROM off, leave the resource region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			 * but keep it unregistered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			if (reg & PCI_ROM_ADDRESS_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 				pr_debug("PCI: Switching off ROM of %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 					 pci_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 				r->flags &= ~IORESOURCE_ROM_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 				pci_write_config_dword(dev, dev->rom_base_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 						       reg & ~PCI_ROM_ADDRESS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct pci_controller *hose = pci_bus_to_host(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	resource_size_t	offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	struct resource *res, *pres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	/* Check for IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	if (!(hose->io_resource.flags & IORESOURCE_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		goto no_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	BUG_ON(res == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	res->name = "Legacy IO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	res->flags = IORESOURCE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	res->start = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	res->end = (offset + 0xfff) & 0xfffffffful;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	pr_debug("Candidate legacy IO: %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (request_resource(&hose->io_resource, res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		       "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		       pci_domain_nr(bus), bus->number, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  no_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	/* Check for memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		pres = &hose->mem_resources[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		offset = hose->mem_offset[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		if (!(pres->flags & IORESOURCE_MEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		pr_debug("hose mem res: %pR\n", pres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		if ((pres->start - offset) <= 0xa0000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		    (pres->end - offset) >= 0xbffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	if (i >= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	BUG_ON(res == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	res->name = "Legacy VGA memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	res->flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	res->start = 0xa0000 + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	res->end = 0xbffff + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	pr_debug("Candidate VGA memory: %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	if (request_resource(pres, res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		       "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		       pci_domain_nr(bus), bus->number, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) void __init pcibios_resource_survey(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	struct pci_bus *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	/* Allocate and assign resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	list_for_each_entry(b, &pci_root_buses, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		pcibios_allocate_bus_resources(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		pcibios_allocate_resources(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		pcibios_allocate_resources(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	/* Before we start assigning unassigned resource, we try to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	 * the low IO area and the VGA memory area if they intersect the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	 * bus available resources to avoid allocating things on top of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		list_for_each_entry(b, &pci_root_buses, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			pcibios_reserve_legacy_regions(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	/* Now, if the platform didn't decide to blindly trust the firmware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	 * we proceed to assigning things that were left unassigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		pr_debug("PCI: Assigning unassigned resources...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		pci_assign_unassigned_resources();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /* This is used by the PCI hotplug driver to allocate resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  * of newly plugged busses. We can try to consolidate with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  * rest of the code later, for now, keep it as-is as our main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)  * resource allocation function doesn't deal with sub-trees yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) void pcibios_claim_one_bus(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	struct pci_bus *child_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	list_for_each_entry(dev, &bus->devices, bus_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			struct resource *r = &dev->resource[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			if (r->parent || !r->start || !r->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 				 pci_name(dev), i, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			if (pci_claim_resource(dev, i) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 			pci_claim_bridge_resource(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	list_for_each_entry(child_bus, &bus->children, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		pcibios_claim_one_bus(child_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* pcibios_finish_adding_to_bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  * This is to be called by the hotplug code after devices have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  * added to a bus, this include calling it for a PHB that is just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)  * being added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) void pcibios_finish_adding_to_bus(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		 pci_domain_nr(bus), bus->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	/* Allocate bus and devices resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	pcibios_allocate_bus_resources(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	pcibios_claim_one_bus(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		if (bus->self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			pci_assign_unassigned_bridge_resources(bus->self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			pci_assign_unassigned_bus_resources(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	/* Add new devices to global lists.  Register in proc, sysfs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	pci_bus_add_devices(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) int pcibios_enable_device(struct pci_dev *dev, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	if (phb->controller_ops.enable_device_hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		if (!phb->controller_ops.enable_device_hook(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	return pci_enable_resources(dev, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) void pcibios_disable_device(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	if (phb->controller_ops.disable_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		phb->controller_ops.disable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	return (unsigned long) hose->io_base_virt - _IO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static void pcibios_setup_phb_resources(struct pci_controller *hose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 					struct list_head *resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	resource_size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	/* Hookup PHB IO resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	res = &hose->io_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (!res->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		pr_debug("PCI: I/O resource not set for host"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			 " bridge %pOF (domain %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			 hose->dn, hose->global_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		offset = pcibios_io_space_offset(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		pr_debug("PCI: PHB IO resource    = %pR off 0x%08llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			 res, (unsigned long long)offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		pci_add_resource_offset(resources, res, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	/* Hookup PHB Memory resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	for (i = 0; i < 3; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		res = &hose->mem_resources[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		if (!res->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		offset = hose->mem_offset[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			 res, (unsigned long long)offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		pci_add_resource_offset(resources, res, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)  * Null PCI config access functions, for the case when we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)  * find a hose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) #define NULL_PCI_OP(rw, size, type)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static int								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	return PCIBIOS_DEVICE_NOT_FOUND;    				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		 int len, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		  int len, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) static struct pci_ops null_pci_ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	.read = null_read_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	.write = null_write_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)  * These functions are used early on before PCI scanning is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)  * and all of the pci_dev and pci_bus structures have been created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static struct pci_bus *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) fake_pci_bus(struct pci_controller *hose, int busnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	static struct pci_bus bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (hose == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	bus.number = busnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	bus.sysdata = hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	bus.ops = hose? hose->ops: &null_pci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	return &bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) #define EARLY_PCI_OP(rw, size, type)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) int early_##rw##_config_##size(struct pci_controller *hose, int bus,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			       int devfn, int offset, type value)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 					    devfn, offset, value);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) EARLY_PCI_OP(read, byte, u8 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) EARLY_PCI_OP(read, word, u16 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) EARLY_PCI_OP(read, dword, u32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) EARLY_PCI_OP(write, byte, u8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) EARLY_PCI_OP(write, word, u16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) EARLY_PCI_OP(write, dword, u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) int early_find_capability(struct pci_controller *hose, int bus, int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			  int cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	struct pci_controller *hose = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	return of_node_get(hose->dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  * @hose: Pointer to the PCI host controller instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) void pcibios_scan_phb(struct pci_controller *hose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	LIST_HEAD(resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	struct pci_bus *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct device_node *node = hose->dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	pr_debug("PCI: Scanning PHB %pOF\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	/* Get some IO space for the new PHB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	pcibios_setup_phb_io_space(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	/* Wire up PHB bus resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	pcibios_setup_phb_resources(hose, &resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	hose->busn.start = hose->first_busno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	hose->busn.end	 = hose->last_busno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	hose->busn.flags = IORESOURCE_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	pci_add_resource(&resources, &hose->busn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	/* Create an empty bus for the toplevel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	bus = pci_create_root_bus(hose->parent, hose->first_busno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 				  hose->ops, hose, &resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	if (bus == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		pr_err("Failed to create bus for PCI domain %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 			hose->global_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		pci_free_resource_list(&resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	hose->bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	/* Get probe mode and perform scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	mode = PCI_PROBE_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	if (node && hose->controller_ops.probe_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		mode = hose->controller_ops.probe_mode(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	pr_debug("    probe mode: %d\n", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	if (mode == PCI_PROBE_DEVTREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		of_scan_bus(node, bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (mode == PCI_PROBE_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		pci_bus_update_busn_res_end(bus, 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		hose->last_busno = pci_scan_child_bus(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		pci_bus_update_busn_res_end(bus, hose->last_busno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	/* Platform gets a chance to do some global fixups before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	 * we proceed to resource allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	if (ppc_md.pcibios_fixup_phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		ppc_md.pcibios_fixup_phb(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	/* Configure PCI Express settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		struct pci_bus *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		list_for_each_entry(child, &bus->children, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 			pcie_bus_configure_settings(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) EXPORT_SYMBOL_GPL(pcibios_scan_phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	int i, class = dev->class >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	/* When configured as agent, programing interface = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	int prog_if = dev->class & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	     class == PCI_CLASS_BRIDGE_OTHER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		(dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		(prog_if == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		(dev->bus->parent == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 			dev->resource[i].start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 			dev->resource[i].end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			dev->resource[i].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static int __init discover_phbs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	if (ppc_md.discover_phbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		ppc_md.discover_phbs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) core_initcall(discover_phbs);