^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2015 - 2019 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/aer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "chip_registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "aspm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * This file contains PCIe utility routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Do all the common PCIe setup and initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int hfi1_pcie_init(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct pci_dev *pdev = dd->pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ret = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * This can happen (in theory) iff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * We did a chip reset, and then failed to reprogram the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * BAR, or the chip reset due to an internal error. We then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * unloaded the driver and reloaded it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Both reset cases set the BAR back to initial state. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * the latter case, the AER sticky error bit at offset 0x718
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * should be set, but the Linux kernel doesn't yet know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * about that, it appears. If the original BAR was retained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * in the kernel data structures, this may be OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ret = pci_request_regions(pdev, DRIVER_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * If the 64 bit setup fails, try 32 bit. Some systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * do not setup 64 bit maps on systems with 2GB or less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * memory installed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) (void)pci_enable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) hfi1_pcie_cleanup(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Clean what was done in hfi1_pcie_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void hfi1_pcie_cleanup(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Release regions should be called after the disable. OK to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * call if request regions has not been called or failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Do remaining PCIe setup, once dd is allocated, and save away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * fields required to re-initialize after a chip reset, or for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * various other purposes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) resource_size_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u32 rcv_array_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) addr = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) len = pci_resource_len(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * The TXE PIO buffers are at the tail end of the chip space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Cut them off and map them separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* sanity check vs expectations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (len != TXE_PIO_SEND + TXE_PIO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dd_dev_err(dd, "chip PIO range does not match\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dd->kregbase1 = ioremap(addr, RCV_ARRAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (!dd->kregbase1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dd_dev_info(dd, "UC base1: %p for %x\n", dd->kregbase1, RCV_ARRAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* verify that reads actually work, save revision for reset check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dd->revision = readq(dd->kregbase1 + CCE_REVISION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (dd->revision == ~(u64)0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) dd_dev_err(dd, "Cannot read chip CSRs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dd->kregbase2 = ioremap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) addr + dd->base2_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) TXE_PIO_SEND - dd->base2_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!dd->kregbase2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dd_dev_err(dd, "UC mapping of kregbase2 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dd_dev_info(dd, "UC base2: %p for %x\n", dd->kregbase2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) TXE_PIO_SEND - dd->base2_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) dd->piobase = ioremap_wc(addr + TXE_PIO_SEND, TXE_PIO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!dd->piobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dd_dev_err(dd, "WC mapping of send buffers failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dd->physaddr = addr; /* used for io_remap, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Map the chip's RcvArray as write-combining to allow us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * to write an entire cacheline worth of entries in one shot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rcv_array_count * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!dd->rcvarray_wc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) dd_dev_err(dd, "WC mapping of receive array failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dd_dev_info(dd, "WC RcvArray: %p for %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dd->rcvarray_wc, rcv_array_count * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dd->flags |= HFI1_PRESENT; /* chip.c CSR routines now work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) hfi1_pcie_ddcleanup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Do PCIe cleanup related to dd, after chip-specific cleanup, etc. Just prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * to releasing the dd memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Void because all of the core pcie cleanup functions are void.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dd->flags &= ~HFI1_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (dd->kregbase1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) iounmap(dd->kregbase1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dd->kregbase1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (dd->kregbase2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) iounmap(dd->kregbase2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dd->kregbase2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (dd->rcvarray_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) iounmap(dd->rcvarray_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dd->rcvarray_wc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (dd->piobase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) iounmap(dd->piobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dd->piobase = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* return the PCIe link speed from the given link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static u32 extract_speed(u16 linkstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u32 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) switch (linkstat & PCI_EXP_LNKSTA_CLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) default: /* not defined, assume Gen1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) case PCI_EXP_LNKSTA_CLS_2_5GB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) speed = 2500; /* Gen 1, 2.5GHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) case PCI_EXP_LNKSTA_CLS_5_0GB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) speed = 5000; /* Gen 2, 5GHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) case PCI_EXP_LNKSTA_CLS_8_0GB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) speed = 8000; /* Gen 3, 8GHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* return the PCIe link speed from the given link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static u32 extract_width(u16 linkstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void update_lbus_info(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u16 linkstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) dd_dev_err(dd, "Unable to read from PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dd->lbus_width = extract_width(linkstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) dd->lbus_speed = extract_speed(linkstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) snprintf(dd->lbus_info, sizeof(dd->lbus_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Read in the current PCIe link width and speed. Find if the link is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Gen3 capable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int pcie_speeds(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u32 linkcap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct pci_dev *parent = dd->pcidev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!pci_is_pcie(dd->pcidev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dd_dev_err(dd, "Can't find PCI Express capability!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* find if our max speed is Gen3 and parent supports Gen3 speeds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dd->link_gen3_capable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ret = pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) dd_dev_err(dd, "Unable to read from PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return pcibios_err_to_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) linkcap & PCI_EXP_LNKCAP_SLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) dd->link_gen3_capable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dd->link_gen3_capable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* obtain the link width and current speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) update_lbus_info(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dd_dev_info(dd, "%s\n", dd->lbus_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Restore command and BARs after a reset has wiped them out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Returns 0 on success, otherwise a negative error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int restore_pci_variables(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ret = pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) dd->pcibar0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dd->pcibar1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ret = pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) dd->pcie_devctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dd->pcie_lnkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dd->pcie_devctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ret = pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dd->pci_tph2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dd_dev_err(dd, "Unable to write to PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return pcibios_err_to_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Save BARs and command to rewrite after device reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Returns 0 on success, otherwise a negative error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int save_pci_variables(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) &dd->pcibar0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) &dd->pcibar1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ret = pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ret = pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) &dd->pcie_devctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) &dd->pcie_lnkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) &dd->pcie_devctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ret = pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) &dd->pci_tph2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dd_dev_err(dd, "Unable to read from PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return pcibios_err_to_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * BIOS may not set PCIe bus-utilization parameters for best performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Check and optionally adjust them to maximize our throughput.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static int hfi1_pcie_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * tune_pcie_caps() - Code to adjust PCIe capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * @dd: Valid device data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) void tune_pcie_caps(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct pci_dev *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u16 rc_mrrs, ep_mrrs, max_mrrs, ectl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Turn on extended tags in DevCtl in case the BIOS has turned it off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * to improve WFR SDMA bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dd_dev_info(dd, "Enabling PCIe extended tags\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ectl |= PCI_EXP_DEVCTL_EXT_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = pcie_capability_write_word(dd->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) PCI_EXP_DEVCTL, ectl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dd_dev_info(dd, "Unable to write to PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Find out supported and configured values for parent (root) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) parent = dd->pcidev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * The driver cannot perform the tuning if it does not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * access to the upstream component.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) dd_dev_info(dd, "Parent not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!pci_is_root_bus(parent->bus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dd_dev_info(dd, "Parent not root\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!pci_is_pcie(parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) dd_dev_info(dd, "Parent is not PCI Express capable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!pci_is_pcie(dd->pcidev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dd_dev_info(dd, "PCI device is not PCI Express capable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) rc_mpss = parent->pcie_mpss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rc_mps = ffs(pcie_get_mps(parent)) - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Find out supported and configured values for endpoint (us) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ep_mpss = dd->pcidev->pcie_mpss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Find max payload supported by root, endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (rc_mpss > ep_mpss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rc_mpss = ep_mpss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* If Supported greater than limit in module param, limit it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (rc_mpss > (hfi1_pcie_caps & 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) rc_mpss = hfi1_pcie_caps & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* If less than (allowed, supported), bump root payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (rc_mpss > rc_mps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rc_mps = rc_mpss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pcie_set_mps(parent, 128 << rc_mps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* If less than (allowed, supported), bump endpoint payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (rc_mpss > ep_mps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ep_mps = rc_mpss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pcie_set_mps(dd->pcidev, 128 << ep_mps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Now the Read Request size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * No field for max supported, but PCIe spec limits it to 4096,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * which is code '5' (log2(4096) - 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) max_mrrs = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (max_mrrs > ((hfi1_pcie_caps >> 4) & 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) max_mrrs = (hfi1_pcie_caps >> 4) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) max_mrrs = 128 << max_mrrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) rc_mrrs = pcie_get_readrq(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ep_mrrs = pcie_get_readrq(dd->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (max_mrrs > rc_mrrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) rc_mrrs = max_mrrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) pcie_set_readrq(parent, rc_mrrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (max_mrrs > ep_mrrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ep_mrrs = max_mrrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pcie_set_readrq(dd->pcidev, ep_mrrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* End of PCIe capability tuning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * From here through hfi1_pci_err_handler definition is invoked via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * PCI error infrastructure, registered via pci
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static pci_ers_result_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct hfi1_devdata *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) case pci_channel_io_normal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dd_dev_info(dd, "State Normal, ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) case pci_channel_io_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dd_dev_info(dd, "State Frozen, requesting reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ret = PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case pci_channel_io_perm_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) dd_dev_info(dd, "State Permanent Failure, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* no more register accesses! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) dd->flags &= ~HFI1_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) hfi1_disable_after_error(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* else early, or other problem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ret = PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) default: /* shouldn't happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dd_dev_info(dd, "HFI1 PCI errors detected (state %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static pci_ers_result_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) pci_mmio_enabled(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u64 words = 0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct hfi1_devdata *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (dd && dd->pport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) words = read_port_cntr(dd->pport, C_RX_WORDS, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (words == ~0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ret = PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) "HFI1 mmio_enabled function called, read wordscntr %llx, returning %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) words, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static pci_ers_result_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pci_slot_reset(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct hfi1_devdata *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dd_dev_info(dd, "HFI1 slot_reset function called, ignored\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return PCI_ERS_RESULT_CAN_RECOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pci_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct hfi1_devdata *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dd_dev_info(dd, "HFI1 resume function called\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Running jobs will fail, since it's asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * unlike sysfs-requested reset. Better than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * doing nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) hfi1_init(dd, 1); /* same as re-init after reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) const struct pci_error_handlers hfi1_pci_err_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) .error_detected = pci_error_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) .mmio_enabled = pci_mmio_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) .slot_reset = pci_slot_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) .resume = pci_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /*============================================================================*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* PCIe Gen3 support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * This code is separated out because it is expected to be removed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * final shipping product. If not, then it will be revisited and items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * will be moved to more standard locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_STS field values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #define DL_STATUS_HFI0 0x1 /* hfi0 firmware download complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) #define DL_STATUS_HFI1 0x2 /* hfi1 firmware download complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) #define DL_STATUS_BOTH 0x3 /* hfi0 and hfi1 firmware download complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_ERR field values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #define DL_ERR_NONE 0x0 /* no error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) #define DL_ERR_SWAP_PARITY 0x1 /* parity error in SerDes interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* or response data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #define DL_ERR_DISABLED 0x2 /* hfi disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) #define DL_ERR_SECURITY 0x3 /* security check failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) #define DL_ERR_SBUS 0x4 /* SBus status error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) #define DL_ERR_XFR_PARITY 0x5 /* parity error during ROM transfer*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* gasket block secondary bus reset delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) #define SBR_DELAY_US 200000 /* 200ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static uint pcie_target = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) module_param(pcie_target, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static uint pcie_force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) module_param(pcie_force, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) MODULE_PARM_DESC(pcie_force, "Force driver to do a PCIe firmware download even if already at target speed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static uint pcie_retry = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) module_param(pcie_retry, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested speed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) #define UNSET_PSET 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) #define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) #define DEFAULT_MCP_PSET 6 /* MCP HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static uint pcie_pset = UNSET_PSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) module_param(pcie_pset, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static uint pcie_ctle = 3; /* discrete on, integrated on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) module_param(pcie_ctle, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* equalization columns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) #define PREC 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #define ATTN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) #define POST 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* discrete silicon preliminary equalization values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static const u8 discrete_preliminary_eq[11][3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* prec attn post */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) { 0x00, 0x00, 0x12 }, /* p0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) { 0x00, 0x00, 0x0c }, /* p1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) { 0x00, 0x00, 0x0f }, /* p2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) { 0x00, 0x00, 0x09 }, /* p3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) { 0x00, 0x00, 0x00 }, /* p4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) { 0x06, 0x00, 0x00 }, /* p5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) { 0x09, 0x00, 0x00 }, /* p6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) { 0x06, 0x00, 0x0f }, /* p7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) { 0x09, 0x00, 0x09 }, /* p8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) { 0x0c, 0x00, 0x00 }, /* p9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) { 0x00, 0x00, 0x18 }, /* p10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* integrated silicon preliminary equalization values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static const u8 integrated_preliminary_eq[11][3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* prec attn post */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) { 0x00, 0x1e, 0x07 }, /* p0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) { 0x00, 0x1e, 0x05 }, /* p1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) { 0x00, 0x1e, 0x06 }, /* p2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) { 0x00, 0x1e, 0x04 }, /* p3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) { 0x00, 0x1e, 0x00 }, /* p4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) { 0x03, 0x1e, 0x00 }, /* p5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) { 0x04, 0x1e, 0x00 }, /* p6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) { 0x03, 0x1e, 0x06 }, /* p7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) { 0x03, 0x1e, 0x04 }, /* p8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) { 0x05, 0x1e, 0x00 }, /* p9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) { 0x00, 0x1e, 0x0a }, /* p10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static const u8 discrete_ctle_tunings[11][4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* DC LF HF BW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) { 0x48, 0x0b, 0x04, 0x04 }, /* p0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) { 0x60, 0x05, 0x0f, 0x0a }, /* p1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) { 0x50, 0x09, 0x06, 0x06 }, /* p2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) { 0x68, 0x05, 0x0f, 0x0a }, /* p3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) { 0x80, 0x05, 0x0f, 0x0a }, /* p4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) { 0x70, 0x05, 0x0f, 0x0a }, /* p5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) { 0x68, 0x05, 0x0f, 0x0a }, /* p6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) { 0x48, 0x09, 0x06, 0x06 }, /* p8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) { 0x60, 0x05, 0x0f, 0x0a }, /* p9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) { 0x38, 0x0f, 0x00, 0x00 }, /* p10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static const u8 integrated_ctle_tunings[11][4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* DC LF HF BW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) { 0x38, 0x0f, 0x00, 0x00 }, /* p0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) { 0x38, 0x0f, 0x00, 0x00 }, /* p1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) { 0x38, 0x0f, 0x00, 0x00 }, /* p2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) { 0x38, 0x0f, 0x00, 0x00 }, /* p3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) { 0x58, 0x0a, 0x05, 0x05 }, /* p4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) { 0x48, 0x0a, 0x05, 0x05 }, /* p5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) { 0x40, 0x0a, 0x05, 0x05 }, /* p6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) { 0x38, 0x0f, 0x00, 0x00 }, /* p8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) { 0x38, 0x09, 0x06, 0x06 }, /* p9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) { 0x38, 0x0e, 0x01, 0x01 }, /* p10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* helper to format the value to write to hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) #define eq_value(pre, curr, post) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ((((u32)(pre)) << \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) | (((u32)(curr)) << PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) | (((u32)(post)) << \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * Load the given EQ preset table into the PCIe hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) u8 div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct pci_dev *pdev = dd->pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) u32 hit_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) u32 violation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u8 c_minus1, c0, c_plus1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) for (i = 0; i < 11; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* set index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) pci_write_config_dword(pdev, PCIE_CFG_REG_PL103, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* write the value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) c_minus1 = eq[i][PREC] / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) c_plus1 = eq[i][POST] / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) pci_write_config_dword(pdev, PCIE_CFG_REG_PL102,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) eq_value(c_minus1, c0, c_plus1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* check if these coefficients violate EQ rules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ret = pci_read_config_dword(dd->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) PCIE_CFG_REG_PL105, &violation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dd_dev_err(dd, "Unable to read from PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) hit_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (violation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) & PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (hit_error == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) "Gen3 EQ Table Coefficient rule violations\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) dd_dev_err(dd, " prec attn post\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dd_dev_err(dd, " p%02d: %02x %02x %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) i, (u32)eq[i][0], (u32)eq[i][1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) (u32)eq[i][2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) dd_dev_err(dd, " %02x %02x %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) (u32)c_minus1, (u32)c0, (u32)c_plus1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) hit_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (hit_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * Steps to be done after the PCIe firmware is downloaded and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * before the SBR for the Pcie Gen3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * The SBus resource is already being held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static void pcie_post_steps(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) set_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * Write to the PCIe PCSes to set the G3_LOCKED_NEXT bits to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * This avoids a spurious framing error that can otherwise be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * generated by the MAC layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Use individual addresses since no broadcast is set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) for (i = 0; i < NUM_PCIE_SERDES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) sbus_request(dd, pcie_pcs_addrs[dd->hfi1_id][i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 0x03, WRITE_SBUS_RECEIVER, 0x00022132);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) clear_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * Trigger a secondary bus reset (SBR) on ourselves using our parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * Based on pci_parent_bus_reset() which is not exported by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * kernel core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static int trigger_sbr(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct pci_dev *dev = dd->pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* need a parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!dev->bus->self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) dd_dev_err(dd, "%s: no parent device\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* should not be anyone else on the bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) list_for_each_entry(pdev, &dev->bus->devices, bus_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (pdev != dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) "%s: another device is on the same bus\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * This is an end around to do an SBR during probe time. A new API needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * to be implemented to have cleaner interface but this fixes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * current brokenness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return pci_bridge_secondary_bus_reset(dev->bus->self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * Write the given gasket interrupt register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) u16 code, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Tell the gasket logic how to react to the reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static void arm_gasket_logic(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) reg = (((u64)1 << dd->hfi1_id) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ((u64)pcie_serdes_broadcast[dd->hfi1_id] <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* read back to push the write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * CCE_PCIE_CTRL long name helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * We redefine these shorter macros to use in the code while leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * chip_registers.h to be autogenerated from the hardware spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) #define LANE_BUNDLE_MASK CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) #define LANE_BUNDLE_SHIFT CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) #define LANE_DELAY_MASK CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) #define LANE_DELAY_SHIFT CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) #define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) #define MARGIN_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) #define MARGIN_G1_G2_OVERWRITE_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #define MARGIN_G1_G2_OVERWRITE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) #define MARGIN_GEN1_GEN2_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) #define MARGIN_GEN1_GEN2_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * Write xmt_margin for full-swing (WFR-B) or half-swing (WFR-C).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) u64 pcie_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) u64 xmt_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) u64 xmt_margin_oe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) u64 lane_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) u64 lane_bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * For Discrete, use full-swing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * - PCIe TX defaults to full-swing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * Leave this register as default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * For Integrated, use half-swing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * - Copy xmt_margin and xmt_margin_oe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * from Gen1/Gen2 to Gen3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* extract initial fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) & MARGIN_GEN1_GEN2_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) & MARGIN_G1_G2_OVERWRITE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) & LANE_BUNDLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * For A0, EFUSE values are not set. Override with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * correct values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (is_ax(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * xmt_margin and OverwiteEnabel should be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * same for Gen1/Gen2 and Gen3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) xmt_margin = 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) xmt_margin_oe = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) lane_delay = 0xF; /* Delay 240ns. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) lane_bundle = 0x0; /* Set to 1 lane. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* overwrite existing values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) | (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) | (xmt_margin << MARGIN_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) | (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) | (lane_delay << LANE_DELAY_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) | (lane_bundle << LANE_BUNDLE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) fname, pcie_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * Do all the steps needed to transition the PCIe link to Gen3 speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int do_pcie_gen3_transition(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct pci_dev *parent = dd->pcidev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) u64 fw_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) u64 reg, therm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) u32 reg32, fs, lf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) u32 status, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int do_retry, retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int intnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) uint default_pset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) uint pset = pcie_pset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) u16 target_vector, target_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) u16 lnkctl2, vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) u8 div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) const u8 (*eq)[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) const u8 (*ctle_tunings)[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) uint static_ctle_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int return_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) u32 target_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* PCIe Gen3 is for the ASIC only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (dd->icode != ICODE_RTL_SILICON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (pcie_target == 1) { /* target Gen1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) target_speed = 2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) } else if (pcie_target == 2) { /* target Gen2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) target_speed = 5000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) } else if (pcie_target == 3) { /* target Gen3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) target_speed = 8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* off or invalid target - skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) dd_dev_info(dd, "%s: Skipping PCIe transition\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* if already at target speed, done (unless forced) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (dd->lbus_speed == target_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) pcie_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) pcie_force ? "re-doing anyway" : "skipping");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!pcie_force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * The driver cannot do the transition if it has no access to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * upstream component
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) dd_dev_info(dd, "%s: No upstream, Can't do gen3 transition\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Previous Gen1/Gen2 bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) target_width = dd->lbus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * Do the Gen3 transition. Steps are those of the PCIe Gen3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * recipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /* step 1: pcie link working in gen1/gen2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* step 2: if either side is not capable of Gen3, done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (pcie_target == 3 && !dd->link_gen3_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) dd_dev_err(dd, "The PCIe link is not Gen3 capable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) goto done_no_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* hold the SBus resource across the firmware download and SBR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) dd_dev_err(dd, "%s: unable to acquire SBus resource\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* make sure thermal polling is not causing interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (therm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) dd_dev_info(dd, "%s: Disabled therm polling\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* the SBus download will reset the spico for thermal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* step 3: download SBus Master firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* step 4: download PCIe Gen3 SerDes firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) dd_dev_info(dd, "%s: downloading firmware\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ret = load_pcie_firmware(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* do not proceed if the firmware cannot be downloaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* step 5: set up device parameter settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dd_dev_info(dd, "%s: setting PCIe registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * PcieCfgSpcie1 - Link Control 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * Leave at reset value. No need to set PerfEq - link equalization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * will be performed automatically after the SBR when the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * speed is 8GT/s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /* clear all 16 per-lane error bits (PCIe: Lane Error Status) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /* step 5a: Set Synopsys Port Logic registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * PcieCfgRegPl2 - Port Force Link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * Set the low power field to 0x10 to avoid unnecessary power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * management messages. All other fields are zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) reg32 = 0x10ul << PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL2, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * PcieCfgRegPl100 - Gen3 Control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * turn on PcieCfgRegPl100.EqEieosCnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * Everything else zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL100, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * PcieCfgRegPl101 - Gen3 EQ FS and LF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * PcieCfgRegPl102 - Gen3 EQ Presets to Coefficients Mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * PcieCfgRegPl103 - Gen3 EQ Preset Index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * PcieCfgRegPl105 - Gen3 EQ Status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * Give initial EQ settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0) { /* discrete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* 1000mV, FS=24, LF = 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) fs = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) lf = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) div = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) eq = discrete_preliminary_eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) default_pset = DEFAULT_DISCRETE_PSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ctle_tunings = discrete_ctle_tunings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* bit 0 - discrete on/off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static_ctle_mode = pcie_ctle & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* 400mV, FS=29, LF = 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) fs = 29;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) lf = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) eq = integrated_preliminary_eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) default_pset = DEFAULT_MCP_PSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ctle_tunings = integrated_ctle_tunings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* bit 1 - integrated on/off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static_ctle_mode = (pcie_ctle >> 1) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) (fs <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) (lf <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) ret = load_eq_table(dd, eq, fs, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * PcieCfgRegPl106 - Gen3 EQ Control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * Set Gen3EqPsetReqVec, leave other fields 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (pset == UNSET_PSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) pset = default_pset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (pset > 10) { /* valid range is 0-10, inclusive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) __func__, pset, default_pset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) pset = default_pset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) ((1 << pset) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * step 5b: Do post firmware download steps via SBus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) dd_dev_info(dd, "%s: doing pcie post steps\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) pcie_post_steps(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * step 5c: Program gasket interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* set the Rx Bit Rate to REFCLK ratio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) write_gasket_interrupt(dd, intnum++, 0x0006, 0x0050);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /* disable pCal for PCIe Gen3 RX equalization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* select adaptive or static CTLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) write_gasket_interrupt(dd, intnum++, 0x0026,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 0x5b01 | (static_ctle_mode << 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Enable iCal for PCIe Gen3 RX equalization, and set which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * evaluation of RX_EQ_EVAL will launch the iCal procedure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) write_gasket_interrupt(dd, intnum++, 0x0026, 0x5202);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (static_ctle_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* apply static CTLE tunings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) u8 pcie_dc, pcie_lf, pcie_hf, pcie_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) pcie_dc = ctle_tunings[pset][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) pcie_lf = ctle_tunings[pset][1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) pcie_hf = ctle_tunings[pset][2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) pcie_bw = ctle_tunings[pset][3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) write_gasket_interrupt(dd, intnum++, 0x0026, 0x5500 | pcie_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) write_gasket_interrupt(dd, intnum++, 0x0000, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * step 5d: program XMT margin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) write_xmt_margin(dd, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * step 5e: disable active state power management (ASPM). It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * will be enabled if required later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) dd_dev_info(dd, "%s: clearing ASPM\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) aspm_hw_disable_l1(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * step 5f: clear DirectSpeedChange
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * PcieCfgRegPl67.DirectSpeedChange must be zero to prevent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * change in the speed target from starting before we are ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * This field defaults to 0 and we are not changing it, so nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * needs to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* step 5g: Set target link speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * Set target link speed to be target on both device and parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * On setting the parent: Some system BIOSs "helpfully" set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * parent target speed to Gen2 to match the ASIC's initial speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * We can set the target Gen3 because we have already checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * that it is Gen3 capable earlier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) dd_dev_info(dd, "%s: setting parent target link speed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) ret = pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) dd_dev_err(dd, "Unable to read from PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) (u32)lnkctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /* only write to parent if target is not as high as ours */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) lnkctl2 |= target_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) (u32)lnkctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) ret = pcie_capability_write_word(parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) PCI_EXP_LNKCTL2, lnkctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) dd_dev_err(dd, "Unable to write to PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) dd_dev_info(dd, "%s: setting target link speed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) dd_dev_err(dd, "Unable to read from PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) (u32)lnkctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) lnkctl2 |= target_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) (u32)lnkctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) dd_dev_err(dd, "Unable to write to PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /* step 5h: arm gasket logic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /* hold DC in reset across the SBR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) (void)read_csr(dd, CCE_DC_CTRL); /* DC reset hold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* save firmware control across the SBR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) dd_dev_info(dd, "%s: arming gasket logic\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) arm_gasket_logic(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * step 6: quiesce PCIe link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * The chip has already been reset, so there will be no traffic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * from the chip. Linux has no easy way to enforce that it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * not try to access the device, so we just need to hope it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * do it while we are doing the reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * step 7: initiate the secondary bus reset (SBR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * step 8: hardware brings the links back up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * step 9: wait for link speed transition to be complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) dd_dev_info(dd, "%s: calling trigger_sbr\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) ret = trigger_sbr(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* step 10: decide what to do next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /* check if we can read PCI space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) "%s: read of VendorID failed after SBR, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (vendor == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) dd_dev_info(dd, "%s: VendorID is all 1s after SBR\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /* restore PCI space registers we know were reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) dd_dev_info(dd, "%s: calling restore_pci_variables\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) ret = restore_pci_variables(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) dd_dev_err(dd, "%s: Could not restore PCI variables\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* restore firmware control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) write_csr(dd, MISC_CFG_FW_CTRL, fw_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * Check the gasket block status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * This is the first CSR read after the SBR. If the read returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * all 1s (fails), the link did not make it back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * Once we're sure we can read and write, clear the DC reset after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * the SBR. Then check for any per-lane errors. Then look over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * the status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) reg = read_csr(dd, ASIC_PCIE_SD_HOST_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) dd_dev_info(dd, "%s: gasket block status: 0x%llx\n", __func__, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (reg == ~0ull) { /* PCIe read failed/timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) dd_dev_err(dd, "SBR failed - unable to read from device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /* clear the DC reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) write_csr(dd, CCE_DC_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /* Set the LED off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) setextled(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /* check for any per-lane errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, ®32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) dd_dev_err(dd, "Unable to read from PCI config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dd_dev_info(dd, "%s: per-lane errors: 0x%x\n", __func__, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* extract status, look for our HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) status = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if ((status & (1 << dd->hfi1_id)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) "%s: gasket status 0x%x, expecting 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) __func__, status, 1 << dd->hfi1_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /* extract error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) err = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) dd_dev_err(dd, "%s: gasket error %d\n", __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* update our link information cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) update_lbus_info(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) dd->lbus_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (dd->lbus_speed != target_speed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) dd->lbus_width < target_width) { /* not target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /* maybe retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) do_retry = retry_count < pcie_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) dd_dev_err(dd, "PCIe link speed or width did not match target%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) do_retry ? ", retrying" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) retry_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (do_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) msleep(100); /* allow time to settle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (therm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) dd_dev_info(dd, "%s: Re-enable therm polling\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) release_chip_resource(dd, CR_SBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) done_no_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /* return no error if it is OK to be at current speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (ret && !return_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) dd_dev_err(dd, "Proceeding at current speed PCIe speed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) dd_dev_info(dd, "%s: done\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }