^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * The file intends to implement PE based on the information from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * All the PEs should be organized as hierarchy tree. The first level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * of the tree will be associated to existing PHBs since the particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * PE is only meaningful in one PHB domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/pci-bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/ppc-pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static int eeh_pe_aux_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static LIST_HEAD(eeh_phb_pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * eeh_set_pe_aux_size - Set PE auxillary data size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * @size: PE auxillary data size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Set PE auxillary data size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void eeh_set_pe_aux_size(int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) eeh_pe_aux_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * eeh_pe_alloc - Allocate PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * @phb: PCI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * @type: PE type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Allocate PE instance dynamically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct eeh_pe *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) size_t alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) alloc_size = sizeof(struct eeh_pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (eeh_pe_aux_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) alloc_size = ALIGN(alloc_size, cache_line_size());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) alloc_size += eeh_pe_aux_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Allocate PHB PE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) pe = kzalloc(alloc_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!pe) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Initialize PHB PE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) pe->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pe->phb = phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) INIT_LIST_HEAD(&pe->child_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) INIT_LIST_HEAD(&pe->edevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) cache_line_size());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * eeh_phb_pe_create - Create PHB PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @phb: PCI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * The function should be called while the PHB is detected during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * system boot or PCI hotplug in order to create PHB PE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int eeh_phb_pe_create(struct pci_controller *phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct eeh_pe *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Allocate PHB PE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pe = eeh_pe_alloc(phb, EEH_PE_PHB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) pr_err("%s: out of memory!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Put it into the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) list_add_tail(&pe->child, &eeh_phb_pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * eeh_wait_state - Wait for PE state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @max_wait: maximal period in millisecond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Wait for the state of associated PE. It might take some time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * to retrieve the PE's state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int eeh_wait_state(struct eeh_pe *pe, int max_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int mwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * According to PAPR, the state of PE might be temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * unavailable. Under the circumstance, we have to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * for indicated time determined by firmware. The maximal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * wait time is 5 minutes, which is acquired from the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * EEH implementation. Also, the original implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * also defined the minimal wait time as 1 second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define EEH_STATE_MIN_WAIT_TIME (1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ret = eeh_ops->get_state(pe, &mwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (ret != EEH_STATE_UNAVAILABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (max_wait <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pr_warn("%s: Timeout when getting PE's state (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __func__, max_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return EEH_STATE_NOT_SUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (mwait < EEH_STATE_MIN_WAIT_TIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) pr_warn("%s: Firmware returned bad wait value %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) __func__, mwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) mwait = EEH_STATE_MIN_WAIT_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pr_warn("%s: Firmware returned too long wait value %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) __func__, mwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mwait = EEH_STATE_MAX_WAIT_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) msleep(min(mwait, max_wait));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) max_wait -= mwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @phb: PCI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * The overall PEs form hierarchy tree. The first layer of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * hierarchy tree is composed of PHB PEs. The function is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * to retrieve the corresponding PHB PE according to the given PHB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct eeh_pe *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) list_for_each_entry(pe, &eeh_phb_pe, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Actually, we needn't check the type since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * the PE for PHB has been determined when that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * was created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * eeh_pe_next - Retrieve the next PE in the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @pe: current PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @root: root PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * The function is used to retrieve the next PE in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * hierarchy PE tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct list_head *next = pe->child_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (next == &pe->child_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (pe == root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) next = pe->child.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (next != &pe->parent->child_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) pe = pe->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return list_entry(next, struct eeh_pe, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * eeh_pe_traverse - Traverse PEs in the specified PHB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @root: root PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @fn: callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @flag: extra parameter to callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * The function is used to traverse the specified PE and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * child PEs. The traversing is to be terminated once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * callback returns something other than NULL, or no more PEs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * to be traversed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void *eeh_pe_traverse(struct eeh_pe *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) eeh_pe_traverse_func fn, void *flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct eeh_pe *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) eeh_for_each_pe(root, pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ret = fn(pe, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (ret) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * eeh_pe_dev_traverse - Traverse the devices from the PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * @root: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * @fn: function callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @flag: extra parameter to callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * The function is used to traverse the devices of the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * PE and its child PEs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) void eeh_pe_dev_traverse(struct eeh_pe *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) eeh_edev_traverse_func fn, void *flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct eeh_pe *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct eeh_dev *edev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pr_warn("%s: Invalid PE %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) __func__, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Traverse root PE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) eeh_for_each_pe(root, pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) eeh_pe_for_each_dev(pe, edev, tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) fn(edev, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * __eeh_pe_get - Check the PE address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * For one particular PE, it can be identified by PE address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * or tranditional BDF address. BDF address is composed of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Bus/Device/Function number. The extra data referred by flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * indicates which type of address should be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int *target_pe = flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* PHB PEs are special and should be ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (pe->type & EEH_PE_PHB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (*target_pe == pe->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * eeh_pe_get - Search PE based on the given address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @phb: PCI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @pe_no: PE number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * Search the corresponding PE based on the specified address which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * is included in the eeh device. The function is used to check if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * the associated PE has been created against the PE address. It's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * notable that the PE address has 2 format: traditional PE address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * which is composed of PCI bus/device/function number, or unified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * PE address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct eeh_pe *root = eeh_phb_pe_get(phb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return eeh_pe_traverse(root, __eeh_pe_get, &pe_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * eeh_pe_tree_insert - Add EEH device to parent PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * @edev: EEH device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @new_pe_parent: PE to create additional PEs under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Add EEH device to the PE in edev->pe_config_addr. If a PE already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * exists with that address then @edev is added to that PE. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * a new PE is created and inserted into the PE tree as a child of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @new_pe_parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * If @new_pe_parent is NULL then the new PE will be inserted under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * directly under the the PHB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct pci_controller *hose = edev->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct eeh_pe *pe, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Search the PE has been existing or not according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * to the PE address. If that has been existing, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * PE should be composed of PCI bus and its subordinate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * components.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) pe = eeh_pe_get(hose, edev->pe_config_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (pe->type & EEH_PE_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) list_add_tail(&edev->entry, &pe->edevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) edev->pe = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * We're running to here because of PCI hotplug caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * EEH recovery. We need clear EEH_PE_INVALID until the top.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) parent = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) while (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!(parent->type & EEH_PE_INVALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) parent->type &= ~EEH_PE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) parent = parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) eeh_edev_dbg(edev, "Added to existing PE (parent: PE#%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pe->parent->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Mark the PE as type of PCI bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pe->type = EEH_PE_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) edev->pe = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Put the edev to PE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) list_add_tail(&edev->entry, &pe->edevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) eeh_edev_dbg(edev, "Added to bus PE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Create a new EEH PE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (edev->physfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pe = eeh_pe_alloc(hose, EEH_PE_VF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pe = eeh_pe_alloc(hose, EEH_PE_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (!pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pr_err("%s: out of memory!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) pe->addr = edev->pe_config_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Put the new EEH PE into hierarchy tree. If the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * can't be found, the newly created PE will be attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * to PHB directly. Otherwise, we have to associate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * PE with its parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!new_pe_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) new_pe_parent = eeh_phb_pe_get(hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!new_pe_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) __func__, hose->global_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) edev->pe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) kfree(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* link new PE into the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pe->parent = new_pe_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) list_add_tail(&pe->child, &new_pe_parent->child_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Put the newly created PE into the child list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * link the EEH device accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) list_add_tail(&edev->entry, &pe->edevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) edev->pe = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) eeh_edev_dbg(edev, "Added to new (parent: PE#%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) new_pe_parent->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * eeh_pe_tree_remove - Remove one EEH device from the associated PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @edev: EEH device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * The PE hierarchy tree might be changed when doing PCI hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * Also, the PCI devices or buses could be removed from the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * during EEH recovery. So we have to call the function remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * corresponding PE accordingly if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int eeh_pe_tree_remove(struct eeh_dev *edev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct eeh_pe *pe, *parent, *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bool keep, recover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) pe = eeh_dev_to_pe(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) eeh_edev_dbg(edev, "No PE found for device.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* Remove the EEH device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) edev->pe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) list_del(&edev->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Check if the parent PE includes any EEH devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * If not, we should delete that. Also, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * delete the parent PE if it doesn't have associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * child PEs and EEH devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) parent = pe->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* PHB PEs should never be removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (pe->type & EEH_PE_PHB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * XXX: KEEP is set while resetting a PE. I don't think it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * ever set without RECOVERING also being set. I could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * be wrong though so catch that with a WARN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) keep = !!(pe->state & EEH_PE_KEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) recover = !!(pe->state & EEH_PE_RECOVERING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) WARN_ON(keep && !recover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (!keep && !recover) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (list_empty(&pe->edevs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) list_empty(&pe->child_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) list_del(&pe->child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) kfree(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Mark the PE as invalid. At the end of the recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * process any invalid PEs will be garbage collected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * We need to delay the free()ing of them since we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * remove edev's while traversing the PE tree which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * might trigger the removal of a PE and we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * deal with that (yet).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (list_empty(&pe->edevs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) list_for_each_entry(child, &pe->child_list, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!(child->type & EEH_PE_INVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) pe->type |= EEH_PE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pe = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * eeh_pe_update_time_stamp - Update PE's frozen time stamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * We have time stamp for each PE to trace its time of getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * frozen in last hour. The function should be called to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * the time stamp on first error of the specific PE. On the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * handle, we needn't account for errors happened in last hour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void eeh_pe_update_time_stamp(struct eeh_pe *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) time64_t tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!pe) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (pe->freeze_count <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) pe->freeze_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) pe->tstamp = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tstamp = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (tstamp - pe->tstamp > 3600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pe->tstamp = tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) pe->freeze_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * eeh_pe_state_mark - Mark specified state for PE and its associated device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * EEH error affects the current PE and its child PEs. The function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * is used to mark appropriate state for the affected PEs and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * associated devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) void eeh_pe_state_mark(struct eeh_pe *root, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct eeh_pe *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) eeh_for_each_pe(root, pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!(pe->state & EEH_PE_REMOVED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pe->state |= state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * eeh_pe_mark_isolated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * Record that a PE has been isolated by marking the PE and it's children as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * as pci_channel_io_frozen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) void eeh_pe_mark_isolated(struct eeh_pe *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct eeh_pe *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct eeh_dev *edev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) eeh_pe_state_mark(root, EEH_PE_ISOLATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) eeh_for_each_pe(root, pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) list_for_each_entry(edev, &pe->edevs, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pdev = eeh_dev_to_pci_dev(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) pdev->error_state = pci_channel_io_frozen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Block PCI config access if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (pe->state & EEH_PE_CFG_RESTRICTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) pe->state |= EEH_PE_CFG_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static void __eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) int mode = *((int *)flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) edev->mode |= mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * eeh_pe_dev_state_mark - Mark state for all device under the PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Mark specific state for all child devices of the PE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * eeh_pe_state_clear - Clear state for the PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * @data: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * @state: state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * @include_passed: include passed-through devices?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * The function is used to clear the indicated state from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * given PE. Besides, we also clear the check count of the PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct eeh_pe *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct eeh_dev *edev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) eeh_for_each_pe(root, pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Keep the state of permanently removed PE intact */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (pe->state & EEH_PE_REMOVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (!include_passed && eeh_pe_passed(pe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pe->state &= ~state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * Special treatment on clearing isolated state. Clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * check count since last isolation and put all affected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * devices to normal state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!(state & EEH_PE_ISOLATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) pe->check_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) eeh_pe_for_each_dev(pe, edev, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pdev = eeh_dev_to_pci_dev(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) pdev->error_state = pci_channel_io_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* Unblock PCI config access if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (pe->state & EEH_PE_CFG_RESTRICTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) pe->state &= ~EEH_PE_CFG_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Some PCI bridges (e.g. PLX bridges) have primary/secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * buses assigned explicitly by firmware, and we probably have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * lost that after reset. So we have to delay the check until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * the PCI-CFG registers have been restored for the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Don't use normal PCI-CFG accessors, which probably has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * blocked on normal path during the stage. So we need utilize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * eeh operations, which is always permitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static void eeh_bridge_check_link(struct eeh_dev *edev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) uint32_t val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * We only check root port and downstream ports of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * PCIe switches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) eeh_edev_dbg(edev, "Checking PCIe link...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Check slot status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) cap = edev->pcie_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) eeh_ops->read_config(edev, cap + PCI_EXP_SLTSTA, 2, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!(val & PCI_EXP_SLTSTA_PDS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* Check power status if we have the capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) eeh_ops->read_config(edev, cap + PCI_EXP_SLTCAP, 2, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (val & PCI_EXP_SLTCAP_PCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) eeh_ops->read_config(edev, cap + PCI_EXP_SLTCTL, 2, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (val & PCI_EXP_SLTCTL_PCC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) eeh_edev_dbg(edev, "In power-off state, power it on ...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) eeh_ops->write_config(edev, cap + PCI_EXP_SLTCTL, 2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) msleep(2 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Enable link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) eeh_ops->read_config(edev, cap + PCI_EXP_LNKCTL, 2, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) val &= ~PCI_EXP_LNKCTL_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* Check link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) eeh_ops->read_config(edev, cap + PCI_EXP_LNKCAP, 4, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) eeh_edev_dbg(edev, "No link reporting capability (0x%08x) \n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Wait the link is up until timeout (5s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) while (timeout < 5000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) timeout += 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) eeh_ops->read_config(edev, cap + PCI_EXP_LNKSTA, 2, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (val & PCI_EXP_LNKSTA_DLLLA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (val & PCI_EXP_LNKSTA_DLLLA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) eeh_edev_dbg(edev, "Link up (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) eeh_edev_dbg(edev, "Link not ready (0x%04x)\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void eeh_restore_bridge_bars(struct eeh_dev *edev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * Device BARs: 0x10 - 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * Bus numbers and windows: 0x18 - 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) for (i = 4; i < 13; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* Rom: 0x38 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) eeh_ops->write_config(edev, 14*4, 4, edev->config_space[14]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* Cache line & Latency timer: 0xC 0xD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) SAVED_BYTE(PCI_CACHE_LINE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) SAVED_BYTE(PCI_LATENCY_TIMER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Max latency, min grant, interrupt ping and line: 0x3C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* PCI Command: 0x4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) eeh_ops->write_config(edev, PCI_COMMAND, 4, edev->config_space[1] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Check the PCIe link is ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) eeh_bridge_check_link(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static void eeh_restore_device_bars(struct eeh_dev *edev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) for (i = 4; i < 10; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* 12 == Expansion ROM Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) eeh_ops->write_config(edev, 12*4, 4, edev->config_space[12]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) SAVED_BYTE(PCI_CACHE_LINE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) SAVED_BYTE(PCI_LATENCY_TIMER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* max latency, min grant, interrupt pin and line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * Restore PERR & SERR bits, some devices require it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * don't touch the other command bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) eeh_ops->read_config(edev, PCI_COMMAND, 4, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (edev->config_space[1] & PCI_COMMAND_PARITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cmd |= PCI_COMMAND_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) cmd &= ~PCI_COMMAND_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (edev->config_space[1] & PCI_COMMAND_SERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) cmd |= PCI_COMMAND_SERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cmd &= ~PCI_COMMAND_SERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) eeh_ops->write_config(edev, PCI_COMMAND, 4, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * @data: EEH device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * @flag: Unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Loads the PCI configuration space base address registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * the expansion ROM base address, the latency timer, and etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * from the saved values in the device node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* Do special restore for bridges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (edev->mode & EEH_DEV_BRIDGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) eeh_restore_bridge_bars(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) eeh_restore_device_bars(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (eeh_ops->restore_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) eeh_ops->restore_config(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * eeh_pe_restore_bars - Restore the PCI config space info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * This routine performs a recursive walk to the children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * of this device as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) void eeh_pe_restore_bars(struct eeh_pe *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * We needn't take the EEH lock since eeh_pe_dev_traverse()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * will take that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * eeh_pe_loc_get - Retrieve location code binding to the given PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Retrieve the location code of the given PE. If the primary PE bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * is root bus, we will grab location code from PHB device tree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * or root port. Otherwise, the upstream bridge's device tree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * of the primary PE bus will be checked for the location code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) const char *eeh_pe_loc_get(struct eeh_pe *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct pci_bus *bus = eeh_pe_bus_get(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct device_node *dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) const char *loc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) while (bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) dn = pci_bus_to_OF_node(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (!dn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) bus = bus->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (pci_is_root_bus(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) loc = of_get_property(dn, "ibm,slot-location-code",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) bus = bus->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return "N/A";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Retrieve the PCI bus according to the given PE. Basically,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * primary PCI bus will be retrieved. The parent bus will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * returned for BUS PE. However, we don't have associated PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * bus for DEVICE PE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct eeh_dev *edev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (pe->type & EEH_PE_PHB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return pe->phb->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* The primary bus might be cached during probe time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (pe->state & EEH_PE_PRI_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return pe->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* Retrieve the parent PCI bus of first (top) PCI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) pdev = eeh_dev_to_pci_dev(edev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return pdev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }