^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCI Express PCI Hot Plug Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1995,2001 Compaq Computer Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2001 IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2003-2004 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define dev_fmt(fmt) "pciehp: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "pciehp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static const struct dmi_system_id inband_presence_disabled_dmi_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Match all Dell systems, as some Dell systems have inband
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * presence disabled on NVMe slots (but don't support the bit to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * report it). Setting inband presence disabled should have no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * negative effect, except on broken hotplug slots that never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * assert presence detect--and those will still work, they will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * just have a bit of extra delay before being probed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) .ident = "Dell System",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .matches = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DMI_MATCH(DMI_OEM_STRING, "Dell System"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return ctrl->pcie->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static irqreturn_t pciehp_isr(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static irqreturn_t pciehp_ist(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static int pciehp_poll(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline int pciehp_request_irq(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int retval, irq = ctrl->pcie->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (pciehp_poll_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) "pciehp_poll-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) slot_name(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return PTR_ERR_OR_ZERO(ctrl->poll_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Installs the interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) IRQF_SHARED, "pciehp", ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline void pciehp_free_irq(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (pciehp_poll_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) kthread_stop(ctrl->poll_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) free_irq(ctrl->pcie->irq, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int pcie_poll_cmd(struct controller *ctrl, int timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u16 slot_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (slot_status == (u16) ~0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ctrl_info(ctrl, "%s: no response from device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (slot_status & PCI_EXP_SLTSTA_CC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) PCI_EXP_SLTSTA_CC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ctrl->cmd_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) timeout -= 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } while (timeout >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0; /* timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void pcie_wait_cmd(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long duration = msecs_to_jiffies(msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long cmd_timeout = ctrl->cmd_started + duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long now, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * If the controller does not generate notifications for command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * completions, we never need to wait between writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (NO_CMD_CMPL(ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!ctrl->cmd_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Even if the command has already timed out, we want to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (time_before_eq(cmd_timeout, now))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) timeout = cmd_timeout - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ctrl->slot_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) jiffies_to_msecs(jiffies - ctrl->cmd_started));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) PCI_EXP_SLTCTL_PIC | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) PCI_EXP_SLTCTL_AIC | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) PCI_EXP_SLTCTL_EIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u16 mask, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u16 slot_ctrl_orig, slot_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) mutex_lock(&ctrl->ctrl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Always wait for any previous command that might still be in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pcie_wait_cmd(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (slot_ctrl == (u16) ~0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ctrl_info(ctrl, "%s: no response from device\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) slot_ctrl_orig = slot_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) slot_ctrl &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) slot_ctrl |= (cmd & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ctrl->cmd_busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ctrl->slot_ctrl = slot_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ctrl->cmd_started = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Controllers with the Intel CF118 and similar errata advertise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Command Completed support, but they only set Command Completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * if we change the "Control" bits for power, power indicator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * attention indicator, or interlock. If we only change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * "Enable" bits, they never set the Command Completed bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (pdev->broken_cmd_compl &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ctrl->cmd_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Optionally wait for the hardware to be ready for a new command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * indicating completion of the above issued command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pcie_wait_cmd(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mutex_unlock(&ctrl->ctrl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * pcie_write_cmd - Issue controller command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @ctrl: controller to which the command is issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @cmd: command value written to slot control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @mask: bitmask of slot control register to be modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pcie_do_write_cmd(ctrl, cmd, mask, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Same as above without waiting for the hardware to latch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pcie_do_write_cmd(ctrl, cmd, mask, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * pciehp_check_link_active() - Is the link active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * @ctrl: PCIe hotplug controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Check whether the downstream link is currently active. Note it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * possible that the card is removed immediately after this so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * caller may need to take it into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * If the hotplug controller itself is not available anymore returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * %-ENODEV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int pciehp_check_link_active(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u16 lnk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u32 l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int delay = 1000, step = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) msleep(step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) delay -= step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) } while (delay > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) PCI_FUNC(devfn), count, step, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void pcie_wait_for_presence(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int timeout = 1250;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u16 slot_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (slot_status & PCI_EXP_SLTSTA_PDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) timeout -= 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) } while (timeout > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int pciehp_check_link_status(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) bool found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u16 lnk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!pcie_wait_for_link(pdev, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (ctrl->inband_presence_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) pcie_wait_for_presence(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) PCI_DEVFN(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* ignore link or presence changes up to this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) &ctrl->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) slot_name(ctrl), lnk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ctrl_info(ctrl, "Slot(%s): No device found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) slot_name(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int __pciehp_link_set(struct controller *ctrl, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u16 lnk_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) lnk_ctrl |= PCI_EXP_LNKCTL_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static int pciehp_link_enable(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return __pciehp_link_set(ctrl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u8 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct controller *ctrl = to_ctrl(hotplug_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u16 slot_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pci_config_pm_runtime_get(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pci_config_pm_runtime_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) *status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct controller *ctrl = to_ctrl(hotplug_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u16 slot_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pci_config_pm_runtime_get(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) pci_config_pm_runtime_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) case PCI_EXP_SLTCTL_ATTN_IND_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *status = 1; /* On */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) *status = 2; /* Blink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) case PCI_EXP_SLTCTL_ATTN_IND_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *status = 0; /* Off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) *status = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void pciehp_get_power_status(struct controller *ctrl, u8 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) u16 slot_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) case PCI_EXP_SLTCTL_PWR_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) *status = 1; /* On */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) case PCI_EXP_SLTCTL_PWR_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) *status = 0; /* Off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *status = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u16 slot_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * pciehp_card_present() - Is the card present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @ctrl: PCIe hotplug controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Function checks whether the card is currently present in the slot and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * in that case returns true. Note it is possible that the card is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * removed immediately after the check so the caller may need to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * this into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * It the hotplug controller itself is not available anymore returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * %-ENODEV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int pciehp_card_present(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u16 slot_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return !!(slot_status & PCI_EXP_SLTSTA_PDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * pciehp_card_present_or_link_active() - whether given slot is occupied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * @ctrl: PCIe hotplug controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Unlike pciehp_card_present(), which determines presence solely from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Presence Detect State bit, this helper also returns true if the Link Active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * bit is set. This is a concession to broken hotplug ports which hardwire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * port is not present anymore returns %-ENODEV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int pciehp_card_present_or_link_active(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ret = pciehp_card_present(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return pciehp_check_link_active(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int pciehp_query_power_fault(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u16 slot_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return !!(slot_status & PCI_EXP_SLTSTA_PFD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct controller *ctrl = to_ctrl(hotplug_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pci_config_pm_runtime_get(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) pcie_write_cmd_nowait(ctrl, status << 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) pci_config_pm_runtime_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * pciehp_set_indicators() - set attention indicator, power indicator, or both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @ctrl: PCIe hotplug controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * @pwr: one of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * PCI_EXP_SLTCTL_PWR_IND_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * PCI_EXP_SLTCTL_PWR_IND_BLINK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * PCI_EXP_SLTCTL_PWR_IND_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * @attn: one of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * PCI_EXP_SLTCTL_ATTN_IND_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * PCI_EXP_SLTCTL_ATTN_IND_BLINK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * PCI_EXP_SLTCTL_ATTN_IND_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) u16 cmd = 0, mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) mask |= PCI_EXP_SLTCTL_PIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) cmd |= (attn & PCI_EXP_SLTCTL_AIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) mask |= PCI_EXP_SLTCTL_AIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) pcie_write_cmd_nowait(ctrl, cmd, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int pciehp_power_on_slot(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u16 slot_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Clear power-fault bit from previous power failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (slot_status & PCI_EXP_SLTSTA_PFD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) PCI_EXP_SLTSTA_PFD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ctrl->power_fault_detected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) PCI_EXP_SLTCTL_PWR_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) retval = pciehp_link_enable(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) void pciehp_power_off_slot(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) PCI_EXP_SLTCTL_PWR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct pci_dev *pdev, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Ignore link changes which occurred while waiting for DPC recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * Could be several if DPC triggered multiple times consecutively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) synchronize_hardirq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (pciehp_poll_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) PCI_EXP_SLTSTA_DLLSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) slot_name(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * If the link is unexpectedly down after successful recovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * the corresponding link change may have been ignored above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * Synthesize it to ensure that it is acted on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) down_read_nested(&ctrl->reset_lock, ctrl->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!pciehp_check_link_active(ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) up_read(&ctrl->reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static irqreturn_t pciehp_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct controller *ctrl = (struct controller *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct device *parent = pdev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u16 status, events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * Interrupts only occur in D3hot or shallower and only if enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (pdev->current_state == PCI_D3cold ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Keep the port accessible by holding a runtime PM ref on its parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * Defer resume of the parent to the IRQ thread if it's suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * Mask the interrupt until then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) pm_runtime_get_noresume(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (!pm_runtime_active(parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) pm_runtime_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) atomic_or(RERUN_ISR, &ctrl->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) read_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (status == (u16) ~0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ctrl_info(ctrl, "%s: no response from device\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pm_runtime_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Slot Status contains plain status bits as well as event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * notification bits; right now we only want the event bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) PCI_EXP_SLTSTA_DLLSC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * If we've already reported a power fault, don't report it again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * until we've done something to handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (ctrl->power_fault_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) status &= ~PCI_EXP_SLTSTA_PFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) else if (status & PCI_EXP_SLTSTA_PFD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ctrl->power_fault_detected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) events |= status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (!events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) pm_runtime_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * In MSI mode, all event bits must be zero before the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * So re-read the Slot Status register in case a bit was set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * between read and write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto read_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) pm_runtime_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * Command Completed notifications are not deferred to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * IRQ thread because it may be waiting for their arrival.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (events & PCI_EXP_SLTSTA_CC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ctrl->cmd_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) wake_up(&ctrl->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (events == PCI_EXP_SLTSTA_CC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) events &= ~PCI_EXP_SLTSTA_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (pdev->ignore_hotplug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* Save pending events for consumption by IRQ thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) atomic_or(events, &ctrl->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static irqreturn_t pciehp_ist(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct controller *ctrl = (struct controller *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) irqreturn_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u32 events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ctrl->ist_running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) pci_config_pm_runtime_get(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* rerun pciehp_isr() if the port was inaccessible on interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = pciehp_isr(irq, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (ret != IRQ_WAKE_THREAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) synchronize_hardirq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) events = atomic_xchg(&ctrl->pending_events, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Check Attention Button Pressed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (events & PCI_EXP_SLTSTA_ABP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) slot_name(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pciehp_handle_button_press(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Check Power Fault Detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (events & PCI_EXP_SLTSTA_PFD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) PCI_EXP_SLTCTL_ATTN_IND_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Ignore Link Down/Up events caused by Downstream Port Containment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * if recovery from the error succeeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ctrl->state == ON_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) events &= ~PCI_EXP_SLTSTA_DLLSC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * Disable requests have higher priority than Presence Detect Changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * or Data Link Layer State Changed events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) down_read_nested(&ctrl->reset_lock, ctrl->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (events & DISABLE_SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) pciehp_handle_disable_request(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pciehp_handle_presence_or_link_change(ctrl, events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) up_read(&ctrl->reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pci_config_pm_runtime_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ctrl->ist_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) wake_up(&ctrl->requester);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static int pciehp_poll(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct controller *ctrl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* poll for interrupt events or user requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) atomic_read(&ctrl->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) pciehp_ist(IRQ_NOTCONNECTED, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pciehp_poll_time = 2; /* clamp to sane value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) schedule_timeout_idle(pciehp_poll_time * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static void pcie_enable_notification(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) u16 cmd, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * TBD: Power fault detected software notification support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * Power fault detected software notification is not enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * now, because it caused power fault detected interrupt storm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * on some machines. On those machines, power fault detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * bit in the slot status register was set again immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * when it is cleared in the interrupt service routine, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * next power fault detected interrupt was notified again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * Always enable link events: thus link-up and link-down shall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * always be treated as hotplug and unplug respectively. Enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * presence detect only if Attention Button is not present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) cmd = PCI_EXP_SLTCTL_DLLSCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (ATTN_BUTTN(ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) cmd |= PCI_EXP_SLTCTL_ABPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cmd |= PCI_EXP_SLTCTL_PDCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!pciehp_poll_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) PCI_EXP_SLTCTL_PFDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) PCI_EXP_SLTCTL_DLLSCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) pcie_write_cmd_nowait(ctrl, cmd, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static void pcie_disable_notification(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) u16 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) PCI_EXP_SLTCTL_DLLSCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) pcie_write_cmd(ctrl, 0, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) void pcie_clear_hotplug_events(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) void pcie_enable_interrupt(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u16 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) pcie_write_cmd(ctrl, mask, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) void pcie_disable_interrupt(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) u16 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * Mask hot-plug interrupt to prevent it triggering immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * when the link goes inactive (we still get PME when any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * enabled events is detected). Same goes with Link Layer State
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * changed event which generates PME immediately when the link goes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * inactive so mask it as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) pcie_write_cmd(ctrl, 0, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * bus reset of the bridge, but at the same time we want to ensure that it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * disable link state notification and presence detection change notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * momentarily, if we see that they could interfere. Also, clear any spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * events after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct controller *ctrl = to_ctrl(hotplug_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct pci_dev *pdev = ctrl_dev(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) u16 stat_mask = 0, ctrl_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) down_write_nested(&ctrl->reset_lock, ctrl->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (!ATTN_BUTTN(ctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) stat_mask |= PCI_EXP_SLTSTA_PDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) stat_mask |= PCI_EXP_SLTSTA_DLLSC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) pcie_write_cmd(ctrl, 0, ctrl_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) up_write(&ctrl->reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) int pcie_init_notification(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (pciehp_request_irq(ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) pcie_enable_notification(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) ctrl->notification_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) void pcie_shutdown_notification(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (ctrl->notification_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) pcie_disable_notification(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) pciehp_free_irq(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ctrl->notification_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static inline void dbg_ctrl(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct pci_dev *pdev = ctrl->pcie->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) u16 reg16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ctrl_dbg(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, ®16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ctrl_dbg(ctrl, "Slot Status : 0x%04x\n", reg16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, ®16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ctrl_dbg(ctrl, "Slot Control : 0x%04x\n", reg16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) #define FLAG(x, y) (((x) & (y)) ? '+' : '-')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static inline int pcie_hotplug_depth(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct pci_bus *bus = dev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) while (bus->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) bus = bus->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (bus->self && bus->self->is_hotplug_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct controller *pcie_init(struct pcie_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct controller *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) u32 slot_cap, slot_cap2, link_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) u8 poweron;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct pci_dev *pdev = dev->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct pci_bus *subordinate = pdev->subordinate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ctrl->pcie = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ctrl->depth = pcie_hotplug_depth(dev->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (pdev->hotplug_user_indicators)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * We assume no Thunderbolt controllers support Command Complete events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * but some controllers falsely claim they do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (pdev->is_thunderbolt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) slot_cap |= PCI_EXP_SLTCAP_NCCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ctrl->slot_cap = slot_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) mutex_init(&ctrl->ctrl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mutex_init(&ctrl->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) init_rwsem(&ctrl->reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) init_waitqueue_head(&ctrl->requester);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) init_waitqueue_head(&ctrl->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dbg_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) down_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) up_read(&pci_bus_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) PCI_EXP_SLTCTL_IBPD_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ctrl->inband_presence_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (dmi_first_match(inband_presence_disabled_dmi_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ctrl->inband_presence_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* Check if Data Link Layer Link Active Reporting is implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* Clear all remaining event bits in Slot Status register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * If empty slot's power status is on, turn power off. The IRQ isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * requested yet, so avoid triggering a notification with this command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (POWER_CTRL(ctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) pciehp_get_power_status(ctrl, &poweron);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) pcie_disable_notification(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) pciehp_power_off_slot(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) void pciehp_release_ctrl(struct controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) cancel_delayed_work_sync(&ctrl->button_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) kfree(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void quirk_cmd_compl(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) u32 slot_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (pci_is_pcie(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (slot_cap & PCI_EXP_SLTCAP_HPC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) !(slot_cap & PCI_EXP_SLTCAP_NCCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) pdev->broken_cmd_compl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);