^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2015 - 2020 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * This file contains all of the code that is specific to the HFI chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "mad.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "pio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include "sdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include "eprom.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include "efivar.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include "platform.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include "aspm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include "affinity.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include "debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include "fault.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include "netdev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) uint num_vls = HFI1_MAX_VLS_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) module_param(num_vls, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Default time to aggregate two 10K packets from the idle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * (timer not running). The timer starts at the end of the first packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * so only the time for one 10K packet and header plus a bit extra is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * 10 * 1024 + 64 header byte = 10304 byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * 10304 byte / 12.5 GB/s = 824.32ns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) module_param(rcv_intr_timeout, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) uint rcv_intr_count = 16; /* same as qib */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) module_param(rcv_intr_count, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ushort link_crc_mask = SUPPORTED_CRCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) module_param(link_crc_mask, ushort, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) uint loopback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) module_param_named(loopback, loopback, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Other driver tunables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static ushort crc_14b_sideband = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static uint use_flr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) uint quick_linkup; /* skip LNI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct flag_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u64 flag; /* the flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) char *str; /* description string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u16 extra; /* extra information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u16 unused0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 unused1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* str must be a string constant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define FLAG_ENTRY0(str, flag) {flag, str, 0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* Send Error Consequences */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define SEC_WRITE_DROPPED 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SEC_PACKET_DROPPED 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define SEC_SC_HALTED 0x4 /* per-context only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define DEFAULT_KRCVQS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define MIN_KERNEL_KCTXTS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define FIRST_KERNEL_KCTXT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * RSM instance allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * 0 - User Fecn Handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * 1 - Vnic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * 2 - AIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * 3 - Verbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define RSM_INS_FECN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define RSM_INS_VNIC 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define RSM_INS_AIP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define RSM_INS_VERBS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Bit offset into the GUID which carries HFI id information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define GUID_HFI_INDEX_SHIFT 39
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* extract the emulation revision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define emulator_rev(dd) ((dd)->irev >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* parallel and serial emulation versions are 3 and 4 respectively */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* RSM fields for Verbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* packet type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define IB_PACKET_TYPE 2ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define QW_SHIFT 6ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* QPN[7..1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define QPN_WIDTH 7ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* LRH.BTH: QW 0, OFFSET 48 - for match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define LRH_BTH_QW 0ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define LRH_BTH_BIT_OFFSET 48ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define LRH_BTH_SELECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define LRH_BTH_MASK 3ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define LRH_BTH_VALUE 2ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define LRH_SC_QW 0ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define LRH_SC_BIT_OFFSET 56ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define LRH_SC_MASK 128ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define LRH_SC_VALUE 0ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* SC[n..0] QW 0, OFFSET 60 - for select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* QPN[m+n:1] QW 1, OFFSET 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* RSM fields for AIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* LRH.BTH above is reused for this rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* BTH.DESTQP: QW 1, OFFSET 16 for match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define BTH_DESTQP_QW 1ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define BTH_DESTQP_BIT_OFFSET 16ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define BTH_DESTQP_MASK 0xFFull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define BTH_DESTQP_VALUE 0x81ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* DETH.SQPN: QW 1 Offset 56 for select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* We use 8 most significant Soure QPN bits as entropy fpr AIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define DETH_AIP_SQPN_QW 3ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define DETH_AIP_SQPN_BIT_OFFSET 56ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define DETH_AIP_SQPN_SELECT_OFFSET \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* RSM fields for Vnic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* L2_TYPE: QW 0, OFFSET 61 - for match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define L2_TYPE_QW 0ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define L2_TYPE_BIT_OFFSET 61ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define L2_TYPE_MASK 3ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define L2_16B_VALUE 2ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* L4_TYPE QW 1, OFFSET 0 - for match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define L4_TYPE_QW 1ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define L4_TYPE_BIT_OFFSET 0ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define L4_16B_TYPE_MASK 0xFFull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define L4_16B_ETH_VALUE 0x78ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* 16B VESWID - for select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* 16B ENTROPY - for select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* defines to build power on SC2VL table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define SC2VL_VAL( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) num, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sc0, sc0val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) sc1, sc1val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) sc2, sc2val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) sc3, sc3val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) sc4, sc4val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) sc5, sc5val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) sc6, sc6val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) sc7, sc7val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define DC_SC_VL_VAL( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) range, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) e0, e0val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) e1, e1val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) e2, e2val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) e3, e3val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) e4, e4val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) e5, e5val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) e6, e6val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) e7, e7val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) e8, e8val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) e9, e9val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) e10, e10val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) e11, e11val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) e12, e12val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) e13, e13val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) e14, e14val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) e15, e15val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* all CceStatus sub-block freeze bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) | CCE_STATUS_RXE_FROZE_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) | CCE_STATUS_TXE_FROZE_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) | CCE_STATUS_TXE_PIO_FROZE_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* all CceStatus sub-block TXE pause bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) | CCE_STATUS_TXE_PAUSED_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) | CCE_STATUS_SDMA_PAUSED_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* all CceStatus sub-block RXE pause bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * CCE Error flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static struct flag_table cce_err_status_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*31*/ FLAG_ENTRY0("LATriggered",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*41-63 reserved*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Misc Error flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static struct flag_table misc_err_status_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * TXE PIO Error flags and consequences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static struct flag_table pio_err_status_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) SEC_WRITE_DROPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* 2*/ FLAG_ENTRY("PioCsrParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* 5*/ FLAG_ENTRY("PioPccFifoParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* 6*/ FLAG_ENTRY("PioPecFifoParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*10*/ FLAG_ENTRY("PioSmPktResetParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*17*/ FLAG_ENTRY("PioInitSmIn",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*21*/ FLAG_ENTRY("PioWriteDataParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /*22*/ FLAG_ENTRY("PioStateMachine",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /*26*/ FLAG_ENTRY("PioVlfSopParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*27*/ FLAG_ENTRY("PioVlFifoParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*29*/ FLAG_ENTRY("PioPpmcSopLen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*30-31 reserved*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) SEC_SPC_FREEZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*36-63 reserved*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* TXE PIO errors that cause an SPC freeze */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #define ALL_PIO_FREEZE_ERR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * TXE SDMA Error flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static struct flag_table sdma_err_status_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*04-63 reserved*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* TXE SDMA errors that cause an SPC freeze */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) #define ALL_SDMA_FREEZE_ERR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) #define PORT_DISCARD_EGRESS_ERRS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * TXE Egress Error flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static struct flag_table egress_err_status_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* 2 reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* 6 reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) SEES(TX_PIO_LAUNCH_INTF_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* 9-10 reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) SEES(TX_SDMA0_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) SEES(TX_SDMA1_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) SEES(TX_SDMA2_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) SEES(TX_SDMA3_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) SEES(TX_SDMA4_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) SEES(TX_SDMA5_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) SEES(TX_SDMA6_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) SEES(TX_SDMA7_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) SEES(TX_SDMA8_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) SEES(TX_SDMA9_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) SEES(TX_SDMA10_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) SEES(TX_SDMA11_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) SEES(TX_SDMA12_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) SEES(TX_SDMA13_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) SEES(TX_SDMA14_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) SEES(TX_SDMA15_DISALLOWED_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * TXE Egress Error Info flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static struct flag_table egress_err_info_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* TXE Egress errors that cause an SPC freeze */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #define ALL_TXE_EGRESS_FREEZE_ERR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) | SEES(TX_LAUNCH_CSR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) | SEES(TX_SBRD_CTL_CSR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) | SEES(TX_CONFIG_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) | SEES(TX_CREDIT_RETURN_PARITY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * TXE Send error flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static struct flag_table send_err_status_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * TXE Send Context Error flags and consequences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static struct flag_table sc_err_status_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* 0*/ FLAG_ENTRY("InconsistentSop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) SEC_PACKET_DROPPED | SEC_SC_HALTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* 1*/ FLAG_ENTRY("DisallowedPacket",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) SEC_PACKET_DROPPED | SEC_SC_HALTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) SEC_WRITE_DROPPED | SEC_SC_HALTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* 3*/ FLAG_ENTRY("WriteOverflow",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) SEC_WRITE_DROPPED | SEC_SC_HALTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) SEC_WRITE_DROPPED | SEC_SC_HALTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* 5-63 reserved*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * RXE Receive Error flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static struct flag_table rxe_err_status_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) RXES(RBUF_BLOCK_LIST_READ_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) RXES(RBUF_BLOCK_LIST_READ_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) RXES(RBUF_CSR_QENT_CNT_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) RXES(RBUF_CSR_QVLD_BIT_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) RXES(RBUF_FL_INITDONE_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) RXES(LOOKUP_DES_PART1_UNC_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) RXES(LOOKUP_DES_PART2_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* RXE errors that will trigger an SPC freeze */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) #define ALL_RXE_FREEZE_ERR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) #define RXE_FREEZE_ABORT_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * DCC Error Flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static struct flag_table dcc_err_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * LCB error flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static struct flag_table lcb_err_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) LCBE(ALL_LNS_FAILED_REINIT_TEST)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) LCBE(REDUNDANT_FLIT_PARITY_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * DC8051 Error Flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static struct flag_table dc8051_err_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * DC8051 Information Error flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static struct flag_table dc8051_info_err_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) FLAG_ENTRY0("Serdes internal loopback failure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) FAILED_SERDES_INTERNAL_LOOPBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) FLAG_ENTRY0("External Device Request Timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) EXTERNAL_DEVICE_REQ_TIMEOUT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * DC8051 Information Host Information flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static struct flag_table dc8051_info_host_msg_flags[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) FLAG_ENTRY0("Host request done", 0x0001),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) FLAG_ENTRY0("BC SMA message", 0x0004),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) FLAG_ENTRY0("External device config request", 0x0020),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) FLAG_ENTRY0("LinkUp achieved", 0x0080),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) FLAG_ENTRY0("Link going down", 0x0100),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) FLAG_ENTRY0("Link width downgraded", 0x0200),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static u32 encoded_size(u32 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) u8 *continuous);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void read_vc_remote_link_width(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) u8 *remote_tx_rate, u16 *link_widths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) u8 *flag_bits, u16 *link_widths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) u8 *device_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) u8 *tx_polarity_inversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) u8 *rx_polarity_inversion, u8 *max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static void handle_sdma_eng_err(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) unsigned int context, u64 err_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static void handle_dcc_err(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) unsigned int context, u64 err_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static void handle_lcb_err(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) unsigned int context, u64 err_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static void set_partition_keys(struct hfi1_pportdata *ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static const char *link_state_name(u32 state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) u32 state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) u64 *out_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static int thermal_init(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static void handle_temp_err(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static void dc_shutdown(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static void dc_start(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) unsigned int *np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * Error interrupt table entry. This is used as input to the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * "clear down" routine used for all second tier error interrupt register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * Second tier interrupt registers have a single bit representing them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * in the top-level CceIntStatus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct err_reg_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) u32 status; /* status CSR offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) u32 clear; /* clear CSR offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) u32 mask; /* mask CSR offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) const char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * Helpers for building HFI and DC error interrupt table entries. Different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * helpers are needed because of inconsistent register names.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #define EE(reg, handler, desc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) handler, desc }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) #define DC_EE1(reg, handler, desc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) #define DC_EE2(reg, handler, desc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * Table of the "misc" grouping of error interrupts. Each entry refers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * another register containing more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /* 3*/ { 0, 0, 0, NULL }, /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* the rest are reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * Index into the Various section of the interrupt sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * corresponding to the Critical Temperature interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) #define TCRIT_INT_SOURCE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * SDMA error interrupt entry - refers to another register containing more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static const struct err_reg_info sdma_eng_err =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static const struct err_reg_info various_err[NUM_VARIOUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* rest are reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * register can not be derived from the MTU value because 10K is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * a power of 2. Therefore, we need a constant. Everything else can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * be calculated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) #define DCC_CFG_PORT_MTU_CAP_10240 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * Table of the DC grouping of error interrupts. Each entry refers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * another register containing more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* the rest are reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct cntr_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * counter name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * csr to read for name (if applicable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) u64 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * offset into dd or ppd to store the counter's value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * accessor for stat element, context either dd or ppd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) int mode, u64 data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) #define CNTR_ELEM(name, csr, offset, flags, accessor) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) csr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) offset, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) flags, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) accessor \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* 32bit RXE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) (counter * 8 + RCV_COUNTER_ARRAY32), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 0, flags | CNTR_32BIT, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) port_access_u32_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) (counter * 8 + RCV_COUNTER_ARRAY32), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 0, flags | CNTR_32BIT, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) dev_access_u32_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* 64bit RXE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) (counter * 8 + RCV_COUNTER_ARRAY64), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 0, flags, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) port_access_u64_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) (counter * 8 + RCV_COUNTER_ARRAY64), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 0, flags, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) dev_access_u64_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) #define OVR_ELM(ctx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) CNTR_ELEM("RcvHdrOvr" #ctx, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) (RCV_HDR_OVFL_CNT + ctx * 0x100), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 0, CNTR_NORMAL, port_access_u64_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /* 32bit TXE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) (counter * 8 + SEND_COUNTER_ARRAY32), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 0, flags | CNTR_32BIT, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) port_access_u32_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* 64bit TXE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) (counter * 8 + SEND_COUNTER_ARRAY64), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 0, flags, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) port_access_u64_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) CNTR_ELEM(#name,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) counter * 8 + SEND_COUNTER_ARRAY64, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) flags, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) dev_access_u64_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* CCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) (counter * 8 + CCE_COUNTER_ARRAY32), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 0, flags | CNTR_32BIT, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) dev_access_u32_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 0, flags | CNTR_32BIT, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) dev_access_u32_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* DC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) #define DC_PERF_CNTR(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) counter, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) flags, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) dev_access_u64_csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) #define DC_PERF_CNTR_LCB(name, counter, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) counter, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) flags, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) dc_access_lcb_cntr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /* ibp counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #define SW_IBP_CNTR(name, cntr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) CNTR_ELEM(#name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) CNTR_SYNTH, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) access_ibp_##cntr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * hfi_addr_from_offset - return addr for readq/writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * @dd - the dd device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * @offset - the offset of the CSR within bar0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * This routine selects the appropriate base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * based on the indicated offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static inline void __iomem *hfi1_addr_from_offset(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) const struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (offset >= dd->base2_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return dd->kregbase2 + (offset - dd->base2_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return dd->kregbase1 + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * read_csr - read CSR at the indicated offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * @dd - the dd device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * @offset - the offset of the CSR within bar0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * Return: the value read or all FF's if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * is no mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (dd->flags & HFI1_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return readq(hfi1_addr_from_offset(dd, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * write_csr - write CSR at the indicated offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * @dd - the dd device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * @offset - the offset of the CSR within bar0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * @value - value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (dd->flags & HFI1_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) void __iomem *base = hfi1_addr_from_offset(dd, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* avoid write to RcvArray */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) writeq(value, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * get_csr_addr - return te iomem address for offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * @dd - the dd device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * @offset - the offset of the CSR within bar0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * Return: The iomem address to use in subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * writeq/readq operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) void __iomem *get_csr_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) const struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (dd->flags & HFI1_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return hfi1_addr_from_offset(dd, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) int mode, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) u64 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (mode == CNTR_MODE_R) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ret = read_csr(dd, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) } else if (mode == CNTR_MODE_W) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) write_csr(dd, csr, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) ret = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) dd_dev_err(dd, "Invalid cntr register access mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /* Dev Access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static u64 dev_access_u32_csr(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) u64 csr = entry->csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (entry->flags & CNTR_SDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (vl == CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) csr += 0x100 * vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return read_write_csr(dd, csr, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static u64 access_sde_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) void *context, int idx, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (dd->per_sdma && idx < dd->num_sdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return dd->per_sdma[idx].err_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static u64 access_sde_int_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) void *context, int idx, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (dd->per_sdma && idx < dd->num_sdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return dd->per_sdma[idx].sdma_int_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) void *context, int idx, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (dd->per_sdma && idx < dd->num_sdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return dd->per_sdma[idx].idle_int_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) void *context, int idx, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (dd->per_sdma && idx < dd->num_sdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return dd->per_sdma[idx].progress_int_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) u64 csr = entry->csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (entry->flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (vl == CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) csr += 8 * vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) val = read_write_csr(dd, csr, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) u32 csr = entry->csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (mode == CNTR_MODE_R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) ret = read_lcb_csr(dd, csr, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) else if (mode == CNTR_MODE_W)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) ret = write_lcb_csr(dd, csr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /* Port Access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct hfi1_pportdata *ppd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return read_write_csr(ppd->dd, entry->csr, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static u64 port_access_u64_csr(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct hfi1_pportdata *ppd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) u64 csr = entry->csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (entry->flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (vl == CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) csr += 8 * vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) val = read_write_csr(ppd->dd, csr, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /* Software defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) u64 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (mode == CNTR_MODE_R) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ret = *cntr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) } else if (mode == CNTR_MODE_W) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) *cntr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ret = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) dd_dev_err(dd, "Invalid cntr sw access mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) struct hfi1_pportdata *ppd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct hfi1_pportdata *ppd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) u64 zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) u64 *counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (vl == CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) counter = &ppd->port_xmit_discards;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) else if (vl >= 0 && vl < C_VL_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) counter = &ppd->port_xmit_discards_vl[vl];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) counter = &zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return read_write_sw(ppd->dd, counter, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct hfi1_pportdata *ppd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct hfi1_pportdata *ppd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) u64 get_all_cpu_total(u64 __percpu *cntr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) u64 counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) counter += *per_cpu_ptr(cntr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) u64 __percpu *cntr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) u64 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (mode == CNTR_MODE_R) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) ret = get_all_cpu_total(cntr) - *z_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) } else if (mode == CNTR_MODE_W) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /* A write can only zero the counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (data == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) *z_val = get_all_cpu_total(cntr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) dd_dev_err(dd, "Invalid cntr sw cpu access mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static u64 access_sw_pio_wait(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return dd->verbs_dev.n_piowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static u64 access_sw_pio_drain(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return dd->verbs_dev.n_piodrain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return dd->ctx0_seq_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) return dd->verbs_dev.n_txwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct hfi1_devdata *dd = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) return dd->verbs_dev.n_kmem_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static u64 access_sw_send_schedule(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /* Software counters for the error status bits within MISC_ERR_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return dd->misc_err_status_cnt[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return dd->misc_err_status_cnt[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return dd->misc_err_status_cnt[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return dd->misc_err_status_cnt[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) return dd->misc_err_status_cnt[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static u64 access_misc_efuse_read_bad_addr_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return dd->misc_err_status_cnt[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) return dd->misc_err_status_cnt[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return dd->misc_err_status_cnt[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) return dd->misc_err_status_cnt[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return dd->misc_err_status_cnt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static u64 access_misc_csr_write_bad_addr_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return dd->misc_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) return dd->misc_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return dd->misc_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * Software counter for the aggregate of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * individual CceErrStatus counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static u64 access_sw_cce_err_status_aggregated_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return dd->sw_cce_err_status_aggregate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * Software counters corresponding to each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * error status bits within CceErrStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) return dd->cce_err_status_cnt[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return dd->cce_err_status_cnt[39];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) return dd->cce_err_status_cnt[38];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return dd->cce_err_status_cnt[37];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) return dd->cce_err_status_cnt[36];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) return dd->cce_err_status_cnt[35];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) return dd->cce_err_status_cnt[34];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return dd->cce_err_status_cnt[33];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return dd->cce_err_status_cnt[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) return dd->cce_err_status_cnt[31];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) return dd->cce_err_status_cnt[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return dd->cce_err_status_cnt[29];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) static u64 access_pcic_transmit_back_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) return dd->cce_err_status_cnt[28];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) static u64 access_pcic_transmit_front_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return dd->cce_err_status_cnt[27];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return dd->cce_err_status_cnt[26];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) return dd->cce_err_status_cnt[25];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return dd->cce_err_status_cnt[24];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return dd->cce_err_status_cnt[23];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return dd->cce_err_status_cnt[22];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) return dd->cce_err_status_cnt[21];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) static u64 access_pcic_n_post_dat_q_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return dd->cce_err_status_cnt[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) return dd->cce_err_status_cnt[19];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) return dd->cce_err_status_cnt[18];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return dd->cce_err_status_cnt[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return dd->cce_err_status_cnt[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return dd->cce_err_status_cnt[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return dd->cce_err_status_cnt[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) return dd->cce_err_status_cnt[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) return dd->cce_err_status_cnt[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) return dd->cce_err_status_cnt[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return dd->cce_err_status_cnt[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return dd->cce_err_status_cnt[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) static u64 access_cce_cli2_async_fifo_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return dd->cce_err_status_cnt[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return dd->cce_err_status_cnt[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) static u64 access_cce_cli0_async_fifo_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) return dd->cce_err_status_cnt[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) return dd->cce_err_status_cnt[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) return dd->cce_err_status_cnt[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static u64 access_cce_trgt_async_fifo_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return dd->cce_err_status_cnt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return dd->cce_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) return dd->cce_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) return dd->cce_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * Software counters corresponding to each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * error status bits within RcvErrStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) return dd->rcv_err_status_cnt[63];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) return dd->rcv_err_status_cnt[62];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) return dd->rcv_err_status_cnt[61];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return dd->rcv_err_status_cnt[60];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) return dd->rcv_err_status_cnt[59];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return dd->rcv_err_status_cnt[58];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) return dd->rcv_err_status_cnt[57];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) return dd->rcv_err_status_cnt[56];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) return dd->rcv_err_status_cnt[55];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) return dd->rcv_err_status_cnt[54];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) return dd->rcv_err_status_cnt[53];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) return dd->rcv_err_status_cnt[52];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return dd->rcv_err_status_cnt[51];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return dd->rcv_err_status_cnt[50];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return dd->rcv_err_status_cnt[49];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) return dd->rcv_err_status_cnt[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return dd->rcv_err_status_cnt[47];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) return dd->rcv_err_status_cnt[46];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static u64 access_rx_hq_intr_csr_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) return dd->rcv_err_status_cnt[45];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) static u64 access_rx_lookup_csr_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) return dd->rcv_err_status_cnt[44];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) static u64 access_rx_lookup_rcv_array_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) return dd->rcv_err_status_cnt[43];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) static u64 access_rx_lookup_rcv_array_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return dd->rcv_err_status_cnt[42];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static u64 access_rx_lookup_des_part2_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) return dd->rcv_err_status_cnt[41];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return dd->rcv_err_status_cnt[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) static u64 access_rx_lookup_des_part1_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) return dd->rcv_err_status_cnt[39];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return dd->rcv_err_status_cnt[38];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) return dd->rcv_err_status_cnt[37];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) return dd->rcv_err_status_cnt[36];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) return dd->rcv_err_status_cnt[35];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) return dd->rcv_err_status_cnt[34];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) return dd->rcv_err_status_cnt[33];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) return dd->rcv_err_status_cnt[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) return dd->rcv_err_status_cnt[31];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) return dd->rcv_err_status_cnt[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) return dd->rcv_err_status_cnt[29];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) return dd->rcv_err_status_cnt[28];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) return dd->rcv_err_status_cnt[27];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) return dd->rcv_err_status_cnt[26];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) return dd->rcv_err_status_cnt[25];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) return dd->rcv_err_status_cnt[24];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) return dd->rcv_err_status_cnt[23];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return dd->rcv_err_status_cnt[22];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) return dd->rcv_err_status_cnt[21];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) return dd->rcv_err_status_cnt[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) return dd->rcv_err_status_cnt[19];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) return dd->rcv_err_status_cnt[18];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) return dd->rcv_err_status_cnt[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) return dd->rcv_err_status_cnt[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) return dd->rcv_err_status_cnt[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) return dd->rcv_err_status_cnt[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) return dd->rcv_err_status_cnt[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) return dd->rcv_err_status_cnt[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return dd->rcv_err_status_cnt[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) return dd->rcv_err_status_cnt[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return dd->rcv_err_status_cnt[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) return dd->rcv_err_status_cnt[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) return dd->rcv_err_status_cnt[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) return dd->rcv_err_status_cnt[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) return dd->rcv_err_status_cnt[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) return dd->rcv_err_status_cnt[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) return dd->rcv_err_status_cnt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return dd->rcv_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) return dd->rcv_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) return dd->rcv_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) * Software counters corresponding to each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) * error status bits within SendPioErrStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) static u64 access_pio_pec_sop_head_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) return dd->send_pio_err_status_cnt[35];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) static u64 access_pio_pcc_sop_head_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) return dd->send_pio_err_status_cnt[34];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) static u64 access_pio_last_returned_cnt_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) return dd->send_pio_err_status_cnt[33];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) static u64 access_pio_current_free_cnt_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) return dd->send_pio_err_status_cnt[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) return dd->send_pio_err_status_cnt[31];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) return dd->send_pio_err_status_cnt[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) return dd->send_pio_err_status_cnt[29];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) return dd->send_pio_err_status_cnt[28];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) return dd->send_pio_err_status_cnt[27];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) return dd->send_pio_err_status_cnt[26];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) return dd->send_pio_err_status_cnt[25];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) static u64 access_pio_block_qw_count_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) return dd->send_pio_err_status_cnt[24];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) static u64 access_pio_write_qw_valid_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) return dd->send_pio_err_status_cnt[23];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) return dd->send_pio_err_status_cnt[22];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) return dd->send_pio_err_status_cnt[21];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) return dd->send_pio_err_status_cnt[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) return dd->send_pio_err_status_cnt[19];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) return dd->send_pio_err_status_cnt[18];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) return dd->send_pio_err_status_cnt[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) return dd->send_pio_err_status_cnt[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) static u64 access_pio_credit_ret_fifo_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) return dd->send_pio_err_status_cnt[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) return dd->send_pio_err_status_cnt[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) return dd->send_pio_err_status_cnt[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) return dd->send_pio_err_status_cnt[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) return dd->send_pio_err_status_cnt[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) static u64 access_pio_sm_pkt_reset_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return dd->send_pio_err_status_cnt[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) return dd->send_pio_err_status_cnt[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) return dd->send_pio_err_status_cnt[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) return dd->send_pio_err_status_cnt[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) return dd->send_pio_err_status_cnt[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) return dd->send_pio_err_status_cnt[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) return dd->send_pio_err_status_cnt[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) return dd->send_pio_err_status_cnt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) return dd->send_pio_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) return dd->send_pio_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) return dd->send_pio_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) * Software counters corresponding to each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) * error status bits within SendDmaErrStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) return dd->send_dma_err_status_cnt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) return dd->send_dma_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) return dd->send_dma_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) return dd->send_dma_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) * Software counters corresponding to each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) * error status bits within SendEgressErrStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return dd->send_egress_err_status_cnt[63];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) static u64 access_tx_read_sdma_memory_csr_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) return dd->send_egress_err_status_cnt[62];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) return dd->send_egress_err_status_cnt[61];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) return dd->send_egress_err_status_cnt[60];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) static u64 access_tx_read_sdma_memory_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) return dd->send_egress_err_status_cnt[59];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) return dd->send_egress_err_status_cnt[58];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) return dd->send_egress_err_status_cnt[57];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) return dd->send_egress_err_status_cnt[56];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) return dd->send_egress_err_status_cnt[55];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) return dd->send_egress_err_status_cnt[54];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) return dd->send_egress_err_status_cnt[53];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) return dd->send_egress_err_status_cnt[52];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) return dd->send_egress_err_status_cnt[51];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) return dd->send_egress_err_status_cnt[50];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) return dd->send_egress_err_status_cnt[49];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) return dd->send_egress_err_status_cnt[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) return dd->send_egress_err_status_cnt[47];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) return dd->send_egress_err_status_cnt[46];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) return dd->send_egress_err_status_cnt[45];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) return dd->send_egress_err_status_cnt[44];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) static u64 access_tx_read_sdma_memory_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) return dd->send_egress_err_status_cnt[43];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) return dd->send_egress_err_status_cnt[42];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) static u64 access_tx_credit_return_partiy_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) return dd->send_egress_err_status_cnt[41];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) return dd->send_egress_err_status_cnt[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) return dd->send_egress_err_status_cnt[39];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) return dd->send_egress_err_status_cnt[38];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) return dd->send_egress_err_status_cnt[37];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) return dd->send_egress_err_status_cnt[36];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) return dd->send_egress_err_status_cnt[35];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) return dd->send_egress_err_status_cnt[34];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) return dd->send_egress_err_status_cnt[33];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) return dd->send_egress_err_status_cnt[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) static u64 access_tx_sdma15_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) return dd->send_egress_err_status_cnt[31];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) static u64 access_tx_sdma14_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) return dd->send_egress_err_status_cnt[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) static u64 access_tx_sdma13_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) return dd->send_egress_err_status_cnt[29];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) static u64 access_tx_sdma12_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) return dd->send_egress_err_status_cnt[28];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) static u64 access_tx_sdma11_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) return dd->send_egress_err_status_cnt[27];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) static u64 access_tx_sdma10_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) return dd->send_egress_err_status_cnt[26];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) static u64 access_tx_sdma9_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) return dd->send_egress_err_status_cnt[25];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) static u64 access_tx_sdma8_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) return dd->send_egress_err_status_cnt[24];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) static u64 access_tx_sdma7_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) return dd->send_egress_err_status_cnt[23];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) static u64 access_tx_sdma6_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) return dd->send_egress_err_status_cnt[22];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) static u64 access_tx_sdma5_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) return dd->send_egress_err_status_cnt[21];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) static u64 access_tx_sdma4_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) return dd->send_egress_err_status_cnt[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) static u64 access_tx_sdma3_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) return dd->send_egress_err_status_cnt[19];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) static u64 access_tx_sdma2_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) return dd->send_egress_err_status_cnt[18];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) static u64 access_tx_sdma1_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) return dd->send_egress_err_status_cnt[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) static u64 access_tx_sdma0_disallowed_packet_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) return dd->send_egress_err_status_cnt[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) return dd->send_egress_err_status_cnt[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) return dd->send_egress_err_status_cnt[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) return dd->send_egress_err_status_cnt[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) return dd->send_egress_err_status_cnt[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) return dd->send_egress_err_status_cnt[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) return dd->send_egress_err_status_cnt[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) return dd->send_egress_err_status_cnt[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) static u64 access_tx_sdma_launch_intf_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) return dd->send_egress_err_status_cnt[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) static u64 access_tx_pio_launch_intf_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) return dd->send_egress_err_status_cnt[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) return dd->send_egress_err_status_cnt[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) static u64 access_tx_incorrect_link_state_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) return dd->send_egress_err_status_cnt[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) return dd->send_egress_err_status_cnt[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) return dd->send_egress_err_status_cnt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) return dd->send_egress_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) return dd->send_egress_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) return dd->send_egress_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) * Software counters corresponding to each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) * error status bits within SendErrStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) static u64 access_send_csr_write_bad_addr_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) return dd->send_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) return dd->send_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) return dd->send_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) * Software counters corresponding to each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) * error status bits within SendCtxtErrStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) static u64 access_pio_write_out_of_bounds_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) return dd->sw_ctxt_err_status_cnt[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) return dd->sw_ctxt_err_status_cnt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) static u64 access_pio_write_crosses_boundary_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) return dd->sw_ctxt_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) return dd->sw_ctxt_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) return dd->sw_ctxt_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) * Software counters corresponding to each of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) * error status bits within SendDmaEngErrStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) static u64 access_sdma_header_request_fifo_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) return dd->sw_send_dma_eng_err_status_cnt[23];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) static u64 access_sdma_header_storage_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) return dd->sw_send_dma_eng_err_status_cnt[22];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) static u64 access_sdma_packet_tracking_cor_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) return dd->sw_send_dma_eng_err_status_cnt[21];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) return dd->sw_send_dma_eng_err_status_cnt[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) return dd->sw_send_dma_eng_err_status_cnt[19];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) static u64 access_sdma_header_request_fifo_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) return dd->sw_send_dma_eng_err_status_cnt[18];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) static u64 access_sdma_header_storage_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) return dd->sw_send_dma_eng_err_status_cnt[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) static u64 access_sdma_packet_tracking_unc_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) return dd->sw_send_dma_eng_err_status_cnt[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) return dd->sw_send_dma_eng_err_status_cnt[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) return dd->sw_send_dma_eng_err_status_cnt[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) return dd->sw_send_dma_eng_err_status_cnt[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) return dd->sw_send_dma_eng_err_status_cnt[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) return dd->sw_send_dma_eng_err_status_cnt[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) return dd->sw_send_dma_eng_err_status_cnt[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) return dd->sw_send_dma_eng_err_status_cnt[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) static u64 access_sdma_packet_desc_overflow_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) return dd->sw_send_dma_eng_err_status_cnt[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) void *context, int vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) return dd->sw_send_dma_eng_err_status_cnt[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) return dd->sw_send_dma_eng_err_status_cnt[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) return dd->sw_send_dma_eng_err_status_cnt[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) return dd->sw_send_dma_eng_err_status_cnt[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) static u64 access_sdma_tail_out_of_bounds_err_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) void *context, int vl, int mode, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) return dd->sw_send_dma_eng_err_status_cnt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) return dd->sw_send_dma_eng_err_status_cnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) return dd->sw_send_dma_eng_err_status_cnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) return dd->sw_send_dma_eng_err_status_cnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) void *context, int vl, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) u64 csr = entry->csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) val = read_write_csr(dd, csr, mode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) if (mode == CNTR_MODE_R) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) } else if (mode == CNTR_MODE_W) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) dd->sw_rcv_bypass_packet_errors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) dd_dev_err(dd, "Invalid cntr register access mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) #define def_access_sw_cpu(cntr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) void *context, int vl, int mode, u64 data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) ppd->ibport_data.rvp.cntr, vl, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) mode, data); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) def_access_sw_cpu(rc_acks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) def_access_sw_cpu(rc_qacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) def_access_sw_cpu(rc_delayed_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) #define def_access_ibp_counter(cntr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) void *context, int vl, int mode, u64 data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) if (vl != CNTR_INVALID_VL) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) return 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) mode, data); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) def_access_ibp_counter(loop_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) def_access_ibp_counter(rc_resends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) def_access_ibp_counter(rnr_naks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) def_access_ibp_counter(other_naks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) def_access_ibp_counter(rc_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) def_access_ibp_counter(pkt_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) def_access_ibp_counter(dmawait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) def_access_ibp_counter(rc_seqnak);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) def_access_ibp_counter(rc_dupreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) def_access_ibp_counter(rdma_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) def_access_ibp_counter(unaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) def_access_ibp_counter(seq_naks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) def_access_ibp_counter(rc_crwaits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) RCV_TID_FLOW_GEN_MISMATCH_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) access_dc_rcv_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) [C_DC_TOTAL_CRC] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) [C_DC_CRC_MULT_LN] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) [C_DC_SEQ_CRC_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) [C_DC_ESC0_ONLY_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) [C_DC_ESC0_PLUS1_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) [C_DC_ESC0_PLUS2_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) [C_DC_REINIT_FROM_PEER_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) [C_DC_MISC_FLG_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) [C_DC_PRF_GOOD_LTP_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) [C_DC_PRF_ACCEPTED_LTP_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) [C_DC_PRF_RX_FLIT_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) [C_DC_PRF_TX_FLIT_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) [C_DC_PRF_CLK_CNTR] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) [C_DC_PG_STS_TX_SBE_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) [C_DC_PG_STS_TX_MBE_CNT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) access_sw_cpu_intr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) access_sw_cpu_rcv_limit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) access_sw_ctx0_seq_drop),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) access_sw_vtx_wait),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) access_sw_pio_wait),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) access_sw_pio_drain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) access_sw_kmem_wait),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) hfi1_access_sw_tid_wait),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) access_sw_send_schedule),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) SEND_DMA_DESC_FETCHED_CNT, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) dev_access_u32_csr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) access_sde_int_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) access_sde_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) access_sde_idle_int_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) access_sde_progress_int_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) /* MISC_ERR_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) access_misc_pll_lock_fail_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) access_misc_mbist_fail_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) access_misc_invalid_eep_cmd_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) access_misc_efuse_done_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) access_misc_efuse_write_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) access_misc_efuse_read_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) access_misc_efuse_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) access_misc_fw_auth_failed_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) access_misc_key_mismatch_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) access_misc_sbus_write_failed_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) access_misc_csr_write_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) access_misc_csr_read_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) access_misc_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) /* CceErrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) access_sw_cce_err_status_aggregated_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) access_cce_msix_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) access_cce_int_map_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) access_cce_int_map_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) access_cce_msix_table_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) access_cce_msix_table_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) access_cce_rxdma_conv_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) access_cce_rcpl_async_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) access_cce_seg_write_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) access_cce_seg_read_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) access_la_triggered_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) access_cce_trgt_cpl_timeout_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) access_pcic_receive_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) access_pcic_transmit_back_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) access_pcic_transmit_front_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) access_pcic_cpl_dat_q_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) access_pcic_cpl_hd_q_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) access_pcic_post_dat_q_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) access_pcic_post_hd_q_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) access_pcic_retry_sot_mem_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) access_pcic_retry_mem_unc_err),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) access_pcic_n_post_dat_q_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) access_pcic_n_post_h_q_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) access_pcic_cpl_dat_q_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) access_pcic_cpl_hd_q_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) access_pcic_post_dat_q_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) access_pcic_post_hd_q_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) access_pcic_retry_sot_mem_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) access_pcic_retry_mem_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) "CceCli1AsyncFifoDbgParityError", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) access_cce_cli1_async_fifo_dbg_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) "CceCli1AsyncFifoRxdmaParityError", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) access_cce_cli1_async_fifo_rxdma_parity_err_cnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) access_cce_cli2_async_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) access_cce_csr_cfg_bus_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) access_cce_cli0_async_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) access_cce_rspd_data_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) access_cce_trgt_access_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) access_cce_trgt_async_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) access_cce_csr_write_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) access_cce_csr_read_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) access_ccs_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) /* RcvErrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) access_rx_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) access_rx_csr_write_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) access_rx_csr_read_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) access_rx_dma_csr_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) access_rx_dma_dq_fsm_encoding_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) access_rx_dma_eq_fsm_encoding_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) access_rx_dma_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) access_rx_rbuf_data_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) access_rx_rbuf_data_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) access_rx_dma_data_fifo_rd_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) access_rx_dma_data_fifo_rd_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) access_rx_dma_hdr_fifo_rd_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) access_rx_dma_hdr_fifo_rd_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) access_rx_rbuf_desc_part2_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) access_rx_rbuf_desc_part2_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) access_rx_rbuf_desc_part1_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) access_rx_rbuf_desc_part1_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) access_rx_hq_intr_fsm_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) access_rx_hq_intr_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) access_rx_lookup_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) access_rx_lookup_rcv_array_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) access_rx_lookup_rcv_array_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) access_rx_lookup_des_part2_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) access_rx_lookup_des_part1_unc_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) access_rx_lookup_des_part1_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) access_rx_rbuf_next_free_buf_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) access_rx_rbuf_next_free_buf_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) "RxRbufFlInitWrAddrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) access_rbuf_fl_init_wr_addr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) access_rx_rbuf_fl_initdone_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) access_rx_rbuf_fl_write_addr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) access_rx_rbuf_fl_rd_addr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) access_rx_rbuf_empty_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) access_rx_rbuf_full_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) access_rbuf_bad_lookup_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) access_rbuf_ctx_id_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) access_rbuf_csr_qeopdw_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) "RxRbufCsrQNumOfPktParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) "RxRbufCsrQTlPtrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) "RxRbufCsrQHeadBufNumParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) access_rx_rbuf_block_list_read_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) access_rx_rbuf_block_list_read_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) access_rx_rbuf_lookup_des_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) access_rx_rbuf_lookup_des_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) "RxRbufLookupDesRegUncCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) access_rx_rbuf_lookup_des_reg_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) access_rx_rbuf_free_list_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) access_rx_rbuf_free_list_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) access_rx_rcv_fsm_encoding_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) access_rx_dma_flag_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) access_rx_dma_flag_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) access_rx_dc_sop_eop_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) access_rx_rcv_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) access_rx_rcv_qp_map_table_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) access_rx_rcv_qp_map_table_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) access_rx_rcv_data_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) access_rx_rcv_data_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) access_rx_rcv_hdr_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) access_rx_rcv_hdr_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) access_rx_dc_intf_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) access_rx_dma_csr_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) /* SendPioErrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) access_pio_pec_sop_head_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) access_pio_pcc_sop_head_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) access_pio_last_returned_cnt_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) access_pio_current_free_cnt_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) access_pio_reserved_31_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) access_pio_reserved_30_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) access_pio_ppmc_sop_len_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) access_pio_ppmc_bqc_mem_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) access_pio_vl_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) access_pio_vlf_sop_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) access_pio_vlf_v1_len_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) access_pio_block_qw_count_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) access_pio_write_qw_valid_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) access_pio_state_machine_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) access_pio_write_data_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) access_pio_host_addr_mem_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) access_pio_host_addr_mem_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) access_pio_init_sm_in_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) access_pio_ppmc_pbl_fifo_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) access_pio_credit_ret_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) access_pio_v1_len_mem_bank1_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) access_pio_v1_len_mem_bank0_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) access_pio_v1_len_mem_bank1_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) access_pio_v1_len_mem_bank0_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) access_pio_sm_pkt_reset_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) access_pio_pkt_evict_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) "PioSbrdctrlCrrelFifoParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) access_pio_sbrdctl_crrel_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) access_pio_pec_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) access_pio_pcc_fifo_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) access_pio_sb_mem_fifo1_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) access_pio_sb_mem_fifo0_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) access_pio_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) access_pio_write_addr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) access_pio_write_bad_ctxt_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) /* SendDmaErrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) access_sdma_pcie_req_tracking_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) access_sdma_pcie_req_tracking_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) access_sdma_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) access_sdma_rpy_tag_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) /* SendEgressErrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) access_tx_read_pio_memory_csr_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) access_tx_read_sdma_memory_csr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) access_tx_egress_fifo_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) access_tx_read_pio_memory_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) access_tx_read_sdma_memory_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) access_tx_sb_hdr_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) access_tx_credit_overrun_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) access_tx_launch_fifo8_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) access_tx_launch_fifo7_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) access_tx_launch_fifo6_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) access_tx_launch_fifo5_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) access_tx_launch_fifo4_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) access_tx_launch_fifo3_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) access_tx_launch_fifo2_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) access_tx_launch_fifo1_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) access_tx_launch_fifo0_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) access_tx_credit_return_vl_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) access_tx_hcrc_insertion_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) access_tx_egress_fifo_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) access_tx_read_pio_memory_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) access_tx_read_sdma_memory_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) access_tx_sb_hdr_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) access_tx_credit_return_partiy_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) access_tx_launch_fifo8_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) access_tx_launch_fifo7_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) access_tx_launch_fifo6_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) access_tx_launch_fifo5_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) access_tx_launch_fifo4_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) access_tx_launch_fifo3_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) access_tx_launch_fifo2_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) access_tx_launch_fifo1_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) access_tx_launch_fifo0_unc_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) access_tx_sdma15_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) access_tx_sdma14_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) access_tx_sdma13_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) access_tx_sdma12_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) access_tx_sdma11_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) access_tx_sdma10_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) access_tx_sdma9_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) access_tx_sdma8_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) access_tx_sdma7_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) access_tx_sdma6_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) access_tx_sdma5_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) access_tx_sdma4_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) access_tx_sdma3_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) access_tx_sdma2_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) access_tx_sdma1_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) access_tx_sdma0_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) access_tx_config_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) access_tx_sbrd_ctl_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) access_tx_launch_csr_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) access_tx_illegal_vl_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) "TxSbrdCtlStateMachineParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) access_tx_sbrd_ctl_state_machine_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) access_egress_reserved_10_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) access_egress_reserved_9_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) access_tx_sdma_launch_intf_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) access_tx_pio_launch_intf_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) access_egress_reserved_6_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) access_tx_incorrect_link_state_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) access_tx_linkdown_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) "EgressFifoUnderrunOrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) access_tx_egress_fifi_underrun_or_parity_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) access_egress_reserved_2_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) access_tx_pkt_integrity_mem_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) access_tx_pkt_integrity_mem_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) /* SendErrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) access_send_csr_write_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) access_send_csr_read_bad_addr_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) access_send_csr_parity_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) /* SendCtxtErrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) access_pio_write_out_of_bounds_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) access_pio_write_overflow_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) access_pio_write_crosses_boundary_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) access_pio_disallowed_packet_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) access_pio_inconsistent_sop_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) /* SendDmaEngErrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) access_sdma_header_request_fifo_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) access_sdma_header_storage_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) access_sdma_packet_tracking_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) access_sdma_assembly_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) access_sdma_desc_table_cor_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) access_sdma_header_request_fifo_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) access_sdma_header_storage_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) access_sdma_packet_tracking_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) access_sdma_assembly_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) access_sdma_desc_table_unc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) access_sdma_timeout_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) access_sdma_header_length_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) access_sdma_header_address_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) access_sdma_header_select_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) access_sdma_reserved_9_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) access_sdma_packet_desc_overflow_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) access_sdma_length_mismatch_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) access_sdma_halt_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) access_sdma_mem_read_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) access_sdma_first_desc_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) access_sdma_tail_out_of_bounds_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) access_sdma_too_long_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) access_sdma_gen_mismatch_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) access_sdma_wrong_dw_err_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) CNTR_SYNTH | CNTR_VL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) access_sw_link_dn_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) access_sw_link_up_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) access_sw_unknown_frame_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) access_sw_xmit_discards),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) access_sw_xmit_discards),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) access_xmit_constraint_errs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) access_rcv_constraint_errs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) access_sw_cpu_rc_acks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) access_sw_cpu_rc_qacks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) access_sw_cpu_rc_delayed_comp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) /* ======================================================================== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) /* return true if this is chip revision revision a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) int is_ax(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) u8 chip_rev_minor =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) & CCE_REVISION_CHIP_REV_MINOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) return (chip_rev_minor & 0xf0) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) /* return true if this is chip revision revision b */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) int is_bx(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) u8 chip_rev_minor =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) & CCE_REVISION_CHIP_REV_MINOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) return (chip_rev_minor & 0xF0) == 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) /* return true is kernel urg disabled for rcd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) bool is_urg_masked(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) u32 is = IS_RCVURGENT_START + rcd->ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) u8 bit = is % 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) return !(mask & BIT_ULL(bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) * Append string s to buffer buf. Arguments curp and len are the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) * position and remaining length, respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) * return 0 on success, 1 on out of room
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) static int append_str(char *buf, char **curp, int *lenp, const char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) char *p = *curp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) int len = *lenp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) int result = 0; /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) char c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) /* add a comma, if first in the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) if (p != buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) result = 1; /* out of room */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) *p++ = ',';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) /* copy the string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) while ((c = *s++) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) result = 1; /* out of room */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) *p++ = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) /* write return values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) *curp = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) *lenp = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) * Using the given flag table, print a comma separated string into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) * the buffer. End in '*' if the buffer is too short.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) static char *flag_string(char *buf, int buf_len, u64 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) struct flag_table *table, int table_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) char extra[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) char *p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) int len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) int no_room = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) /* make sure there is at least 2 so we can form "*" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) if (len < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) return "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) len--; /* leave room for a nul */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) for (i = 0; i < table_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) if (flags & table[i].flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) no_room = append_str(buf, &p, &len, table[i].str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) if (no_room)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) flags &= ~table[i].flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) /* any undocumented bits left? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) if (!no_room && flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) no_room = append_str(buf, &p, &len, extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) /* add * if ran out of room */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) if (no_room) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) /* may need to back up to add space for a '*' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) --p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) *p++ = '*';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) /* add final nul - space already allocated above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) *p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) /* first 8 CCE error interrupt source names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) static const char * const cce_misc_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) "CceErrInt", /* 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) "RxeErrInt", /* 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) "MiscErrInt", /* 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) "Reserved3", /* 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) "PioErrInt", /* 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) "SDmaErrInt", /* 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) "EgressErrInt", /* 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) "TxeErrInt" /* 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) * Return the miscellaneous error interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) if (source < ARRAY_SIZE(cce_misc_names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) strncpy(buf, cce_misc_names[source], bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) snprintf(buf, bsize, "Reserved%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) source + IS_GENERAL_ERR_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) * Return the SDMA engine error interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) snprintf(buf, bsize, "SDmaEngErrInt%u", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) * Return the send context error interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) snprintf(buf, bsize, "SendCtxtErrInt%u", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) static const char * const various_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) "PbcInt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) "GpioAssertInt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) "Qsfp1Int",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) "Qsfp2Int",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) "TCritInt"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) * Return the various interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) static char *is_various_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) if (source < ARRAY_SIZE(various_names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) strncpy(buf, various_names[source], bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) * Return the DC interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) static const char * const dc_int_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) "common",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) "lcb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) "8051",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) "lbm" /* local block merge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) if (source < ARRAY_SIZE(dc_int_names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) snprintf(buf, bsize, "DCInt%u", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) static const char * const sdma_int_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) "SDmaInt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) "SdmaIdleInt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) "SdmaProgressInt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) * Return the SDMA engine interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) /* what interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) unsigned int what = source / TXE_NUM_SDMA_ENGINES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) /* which engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) unsigned int which = source % TXE_NUM_SDMA_ENGINES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) if (likely(what < 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) * Return the receive available interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) snprintf(buf, bsize, "RcvAvailInt%u", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) * Return the receive urgent interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) snprintf(buf, bsize, "RcvUrgentInt%u", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) * Return the send credit interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) snprintf(buf, bsize, "SendCreditInt%u", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) * Return the reserved interrupt name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) return flag_string(buf, buf_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) cce_err_status_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) ARRAY_SIZE(cce_err_status_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) return flag_string(buf, buf_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) rxe_err_status_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) ARRAY_SIZE(rxe_err_status_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) return flag_string(buf, buf_len, flags, misc_err_status_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) ARRAY_SIZE(misc_err_status_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) return flag_string(buf, buf_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) pio_err_status_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) ARRAY_SIZE(pio_err_status_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) return flag_string(buf, buf_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) sdma_err_status_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) ARRAY_SIZE(sdma_err_status_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) return flag_string(buf, buf_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) egress_err_status_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) ARRAY_SIZE(egress_err_status_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) return flag_string(buf, buf_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) egress_err_info_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) ARRAY_SIZE(egress_err_info_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) static char *send_err_status_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) return flag_string(buf, buf_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) send_err_status_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) ARRAY_SIZE(send_err_status_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) * For most these errors, there is nothing that can be done except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) * report or record it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) dd_dev_info(dd, "CCE Error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) cce_err_status_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) /* this error requires a manual drop into SPC freeze mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) /* then a fix up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) start_freeze_handling(dd->pport, FREEZE_SELF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) if (reg & (1ull << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) incr_cntr64(&dd->cce_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) /* maintain a counter over all cce_err_status errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) incr_cntr64(&dd->sw_cce_err_status_aggregate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) * Check counters for receive errors that do not have an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) * associated with them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) #define RCVERR_CHECK_TIME 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) static void update_rcverr_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) set_link_down_reason(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) queue_work(ppd->link_wq, &ppd->link_bounce_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) static int init_rcverr(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) /* Assume the hardware counter has been reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) dd->rcv_ovfl_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) static void free_rcverr(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) if (dd->rcverr_timer.function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) del_timer_sync(&dd->rcverr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) dd_dev_info(dd, "Receive Error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) rxe_err_status_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) if (reg & ALL_RXE_FREEZE_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) * Freeze mode recovery is disabled for the errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) * in RXE_FREEZE_ABORT_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) flags = FREEZE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) start_freeze_handling(dd->pport, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) if (reg & (1ull << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) incr_cntr64(&dd->rcv_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) dd_dev_info(dd, "Misc Error: %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) misc_err_status_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) if (reg & (1ull << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) incr_cntr64(&dd->misc_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) dd_dev_info(dd, "PIO Error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) pio_err_status_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) if (reg & ALL_PIO_FREEZE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) start_freeze_handling(dd->pport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) if (reg & (1ull << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) incr_cntr64(&dd->send_pio_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) dd_dev_info(dd, "SDMA Error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) sdma_err_status_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) if (reg & ALL_SDMA_FREEZE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) start_freeze_handling(dd->pport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) if (reg & (1ull << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) incr_cntr64(&dd->send_dma_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) static inline void __count_port_discards(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) incr_cntr64(&ppd->port_xmit_discards);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) static void count_port_inactive(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) __count_port_discards(dd->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) * We have had a "disallowed packet" error during egress. Determine the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) * integrity check which failed, and update relevant error counter, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) * Note that the SEND_EGRESS_ERR_INFO register has only a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) * bit of state per integrity check, and so we can miss the reason for an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) * egress error if more than one packet fails the same integrity check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) static void handle_send_egress_err_info(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) int vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) /* clear down all observed info as quickly as possible after read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) write_csr(dd, SEND_EGRESS_ERR_INFO, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) info, egress_err_info_string(buf, sizeof(buf), info), src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) /* Eventually add other counters for each bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) if (info & PORT_DISCARD_EGRESS_ERRS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) int weight, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) * Count all applicable bits as individual errors and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) * attribute them to the packet that triggered this handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) * This may not be completely accurate due to limitations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) * on the available hardware error information. There is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) * a single information register and any number of error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) * packets may have occurred and contributed to it before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) * this routine is called. This means that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) * a) If multiple packets with the same error occur before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) * this routine is called, earlier packets are missed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) * There is only a single bit for each error type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) * b) Errors may not be attributed to the correct VL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) * The driver is attributing all bits in the info register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) * to the packet that triggered this call, but bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) * could be an accumulation of different packets with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) * different VLs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) * c) A single error packet may have multiple counts attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) * to it. There is no way for the driver to know if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) * multiple bits set in the info register are due to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) * single packet or multiple packets. The driver assumes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) * multiple packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) for (i = 0; i < weight; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) __count_port_discards(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) if (vl >= 0 && vl < TXE_NUM_DATA_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) else if (vl == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) incr_cntr64(&ppd->port_xmit_discards_vl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) [C_VL_15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) * register. Does it represent a 'port inactive' error?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) static inline int port_inactive_err(u64 posn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) return (posn >= SEES(TX_LINKDOWN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) posn <= SEES(TX_INCORRECT_LINK_STATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) * register. Does it represent a 'disallowed packet' error?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) static inline int disallowed_pkt_err(int posn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) * Input value is a bit position of one of the SDMA engine disallowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) * packet errors. Return which engine. Use of this must be guarded by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) * disallowed_pkt_err().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) static inline int disallowed_pkt_engine(int posn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) * be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) static int engine_to_vl(struct hfi1_devdata *dd, int engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) struct sdma_vl_map *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) int vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) /* range check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) m = rcu_dereference(dd->sdma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) vl = m->engine_to_vl[engine];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) return vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) * Translate the send context (sofware index) into a VL. Return -1 if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) * translation cannot be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) struct send_context_info *sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) struct send_context *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) sci = &dd->send_contexts[sw_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) /* there is no information for user (PSM) and ack contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) sc = sci->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) if (!sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) if (dd->vld[15].sc == sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) return 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) for (i = 0; i < num_vls; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) if (dd->vld[i].sc == sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) u64 reg_copy = reg, handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) start_freeze_handling(dd->pport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) else if (is_ax(dd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) start_freeze_handling(dd->pport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) while (reg_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) int posn = fls64(reg_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) /* fls64() returns a 1-based offset, we want it zero based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) int shift = posn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) u64 mask = 1ULL << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) if (port_inactive_err(shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) count_port_inactive(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) handled |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) } else if (disallowed_pkt_err(shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) handle_send_egress_err_info(dd, vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) handled |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) reg_copy &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) reg &= ~handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) dd_dev_info(dd, "Egress Error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) egress_err_status_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) if (reg & (1ull << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) incr_cntr64(&dd->send_egress_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) dd_dev_info(dd, "Send Error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) send_err_status_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) if (reg & (1ull << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) incr_cntr64(&dd->send_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) * The maximum number of times the error clear down will loop before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) * blocking a repeating error. This value is arbitrary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) #define MAX_CLEAR_COUNT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) * Clear and handle an error register. All error interrupts are funneled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) * through here to have a central location to correctly handle single-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) * or multi-shot errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) * For non per-context registers, call this routine with a context value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) * of 0 so the per-context offset is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) * If the handler loops too many times, assume that something is wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) * and can't be fixed, so mask the error bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) static void interrupt_clear_down(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) u32 context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) const struct err_reg_info *eri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) /* read in a loop until no more errors are seen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) reg = read_kctxt_csr(dd, context, eri->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) if (reg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) write_kctxt_csr(dd, context, eri->clear, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) if (likely(eri->handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) eri->handler(dd, context, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) if (count > MAX_CLEAR_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) eri->desc, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) * Read-modify-write so any other masked bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) * remain masked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) mask = read_kctxt_csr(dd, context, eri->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) mask &= ~reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) write_kctxt_csr(dd, context, eri->mask, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) * CCE block "misc" interrupt. Source is < 16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) const struct err_reg_info *eri = &misc_errs[source];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) if (eri->handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) interrupt_clear_down(dd, 0, eri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) return flag_string(buf, buf_len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) sc_err_status_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) ARRAY_SIZE(sc_err_status_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) * Send context error interrupt. Source (hw_context) is < 160.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) * All send context errors cause the send context to halt. The normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) * clear-down mechanism cannot be used because we cannot clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) * error bits until several other long-running items are done first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) * This is OK because with the context halted, nothing else is going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) * to happen on it anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) static void is_sendctxt_err_int(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) unsigned int hw_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) struct send_context_info *sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) struct send_context *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) char flags[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) u64 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) u32 sw_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) sw_index = dd->hw_to_sw[hw_context];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) if (sw_index >= dd->num_send_contexts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) "out of range sw index %u for send context %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) sw_index, hw_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) sci = &dd->send_contexts[sw_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) spin_lock_irqsave(&dd->sc_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) sc = sci->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) sw_index, hw_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) /* tell the software that a halt has begun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) sc_stop(sc, SCF_HALTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) send_context_err_status_string(flags, sizeof(flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) * Automatically restart halted kernel contexts out of interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) * context. User contexts must ask the driver to restart the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) if (sc->type != SC_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) queue_work(dd->pport->hfi1_wq, &sc->halt_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) * Update the counters for the corresponding status bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) * Note that these particular counters are aggregated over all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) * 160 contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) if (status & (1ull << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) static void handle_sdma_eng_err(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) unsigned int source, u64 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) struct sdma_engine *sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) sde = &dd->per_sdma[source];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) sde->this_idx, source, (unsigned long long)status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) sde->err_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) sdma_engine_error(sde, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) * Update the counters for the corresponding status bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) * Note that these particular counters are aggregated over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) * all 16 DMA engines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) if (status & (1ull << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) * CCE block SDMA error interrupt. Source is < 16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) struct sdma_engine *sde = &dd->per_sdma[source];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) sdma_dumpstate(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) interrupt_clear_down(dd, source, &sdma_eng_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) * CCE block "various" interrupt. Source is < 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) const struct err_reg_info *eri = &various_err[source];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) * TCritInt cannot go through interrupt_clear_down()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) * because it is not a second tier interrupt. The handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) * should be called directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) if (source == TCRIT_INT_SOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) handle_temp_err(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) else if (eri->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) interrupt_clear_down(dd, 0, eri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) "%s: Unimplemented/reserved interrupt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) __func__, source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) /* src_ctx is always zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) if (reg & QSFP_HFI0_MODPRST_N) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) if (!qsfp_mod_present(ppd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) dd_dev_info(dd, "%s: QSFP module removed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) ppd->driver_link_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) * Cable removed, reset all our information about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) * cache and cable capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) * We don't set cache_refresh_required here as we expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) * an interrupt when a cable is inserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) ppd->qsfp_info.cache_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) ppd->qsfp_info.reset_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) ppd->qsfp_info.limiting_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) /* Invert the ModPresent pin now to detect plug-in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) ASIC_QSFP1_INVERT, qsfp_int_mgmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) if ((ppd->offline_disabled_reason >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) HFI1_ODR_MASK(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) (ppd->offline_disabled_reason ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) ppd->offline_disabled_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) HFI1_ODR_MASK(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) if (ppd->host_link_state == HLS_DN_POLL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) * The link is still in POLL. This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) * that the normal link down processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) * will not happen. We have to do it here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) * before turning the DC off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) queue_work(ppd->link_wq, &ppd->link_down_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) dd_dev_info(dd, "%s: QSFP module inserted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) ppd->qsfp_info.cache_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) ppd->qsfp_info.cache_refresh_required = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) * Stop inversion of ModPresent pin to detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) * removal of the cable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) ASIC_QSFP1_INVERT, qsfp_int_mgmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) ppd->offline_disabled_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) if (reg & QSFP_HFI0_INT_N) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) ppd->qsfp_info.check_interrupt_flags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) /* Schedule the QSFP work only if there is a cable attached. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) if (qsfp_mod_present(ppd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) static int request_host_lcb_access(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) ret = do_8051_command(dd, HCMD_MISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) LOAD_DATA_FIELD_ID_SHIFT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) dd_dev_err(dd, "%s: command failed with error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) return ret == HCMD_SUCCESS ? 0 : -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) static int request_8051_lcb_access(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) ret = do_8051_command(dd, HCMD_MISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) LOAD_DATA_FIELD_ID_SHIFT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) dd_dev_err(dd, "%s: command failed with error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) return ret == HCMD_SUCCESS ? 0 : -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) * Set the LCB selector - allow host access. The DCC selector always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) * points to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) static inline void set_host_lcb_access(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) * Clear the LCB selector - allow 8051 access. The DCC selector always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) * points to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) * Acquire LCB access from the 8051. If the host already has access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) * just increment a counter. Otherwise, inform the 8051 that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) * host is taking access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) * -EBUSY if the 8051 has control and cannot be disturbed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) * -errno if unable to acquire access from the 8051
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) * Use the host link state lock so the operation of this routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) * { link state check, selector change, count increment } can occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) * as a unit against a link state change. Otherwise there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) * race between the state change and the count increment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) if (sleep_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) mutex_lock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) while (!mutex_trylock(&ppd->hls_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) /* this access is valid only when the link is up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) if (ppd->host_link_state & HLS_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) dd_dev_info(dd, "%s: link state %s not up\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) __func__, link_state_name(ppd->host_link_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) if (dd->lcb_access_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) ret = request_host_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) "%s: unable to acquire LCB access, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) set_host_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) dd->lcb_access_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) mutex_unlock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) * Release LCB access by decrementing the use count. If the count is moving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) * from 1 to 0, inform 8051 that it has control back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) * -errno if unable to release access to the 8051
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) * Use the host link state lock because the acquire needed it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) * Here, we only need to keep { selector change, count decrement }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) * as a unit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) if (sleep_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) mutex_lock(&dd->pport->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) while (!mutex_trylock(&dd->pport->hls_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) if (dd->lcb_access_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) if (dd->lcb_access_count == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) set_8051_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) ret = request_8051_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) "%s: unable to release LCB access, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) /* restore host access if the grant didn't work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) set_host_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) dd->lcb_access_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) mutex_unlock(&dd->pport->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) * Initialize LCB access variables and state. Called during driver load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) * after most of the initialization is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) * The DC default is LCB access on for the host. The driver defaults to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) * leaving access to the 8051. Assign access now - this constrains the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) * to this routine to be after all LCB set-up is done. In particular, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) static void init_lcb_access(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) dd->lcb_access_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) * Write a response back to a 8051 request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) (u64)return_code <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) * Handle host requests from the 8051.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) static void handle_8051_request(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) u16 data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) return; /* no request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) /* zero out COMPLETED so the response is seen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) /* extract request details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) case HREQ_LOAD_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) case HREQ_SAVE_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) case HREQ_READ_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) case HREQ_SET_TX_EQ_ABS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) case HREQ_SET_TX_EQ_REL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) case HREQ_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) case HREQ_LCB_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) /* Put the LCB, RX FPE and TX FPE into reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) /* Make sure the write completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) (void)read_csr(dd, DCC_CFG_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) /* Hold the reset long enough to take effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) /* Take the LCB, RX FPE and TX FPE out of reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) hreq_response(dd, HREQ_SUCCESS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) case HREQ_CONFIG_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) hreq_response(dd, HREQ_SUCCESS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) case HREQ_INTERFACE_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) hreq_response(dd, HREQ_SUCCESS, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) * Set up allocation unit vaulue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) void set_up_vau(struct hfi1_devdata *dd, u8 vau)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) /* do not modify other values in the register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) * Set up initial VL15 credits of the remote. Assumes the rest of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) * the CM credit registers are zero from a previous global or credit reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) * Shared limit for VL15 will always be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) /* set initial values for total and shared credit limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) * Set total limit to be equal to VL15 credits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) * Leave shared limit at 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) * Zero all credit details from the previous connection and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) * reset the CM manager's internal counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) void reset_link_credits(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) /* remove all previous VL credit limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) for (i = 0; i < TXE_NUM_DATA_VL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) write_csr(dd, SEND_CM_CREDIT_VL15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) /* reset the CM block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) pio_send_control(dd, PSC_CM_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) /* reset cached value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) dd->vl15buf_cached = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) /* convert a vCU to a CU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) static u32 vcu_to_cu(u8 vcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) return 1 << vcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) /* convert a CU to a vCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) static u8 cu_to_vcu(u32 cu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) return ilog2(cu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) /* convert a vAU to an AU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) static u32 vau_to_au(u8 vau)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) return 8 * (1 << vau);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) static void set_linkup_defaults(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) ppd->sm_trap_qp = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) ppd->sa_qp = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) /* clear lcb run: LCB_CFG_RUN.EN = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) write_csr(dd, DC_LCB_CFG_RUN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) reg = read_csr(dd, DCC_CFG_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) write_csr(dd, DCC_CFG_RESET, reg |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) if (!abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) udelay(1); /* must hold for the longer of 16cclks or 20ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) write_csr(dd, DCC_CFG_RESET, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) * This routine should be called after the link has been transitioned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) * reset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) * The expectation is that the caller of this routine would have taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) * care of properly transitioning the link into the correct state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) * NOTE: the caller needs to acquire the dd->dc8051_lock lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) * before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) static void _dc_shutdown(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) lockdep_assert_held(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) if (dd->dc_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) dd->dc_shutdown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) /* Shutdown the LCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) lcb_shutdown(dd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) * Going to OFFLINE would have causes the 8051 to put the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) * SerDes into reset already. Just need to shut down the 8051,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) * itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) write_csr(dd, DC_DC8051_CFG_RST, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) static void dc_shutdown(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) mutex_lock(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) _dc_shutdown(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) mutex_unlock(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) * Calling this after the DC has been brought out of reset should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) * do any damage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) * NOTE: the caller needs to acquire the dd->dc8051_lock lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) * before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) static void _dc_start(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) lockdep_assert_held(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) if (!dd->dc_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) /* Take the 8051 out of reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) write_csr(dd, DC_DC8051_CFG_RST, 0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) /* Wait until 8051 is ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) if (wait_fm_ready(dd, TIMEOUT_8051_START))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) /* lcb_shutdown() with abort=1 does not restore these */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) dd->dc_shutdown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) static void dc_start(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) mutex_lock(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) _dc_start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) mutex_unlock(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) * These LCB adjustments are for the Aurora SerDes core in the FPGA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) u64 rx_radr, tx_radr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) if (dd->icode != ICODE_FPGA_EMULATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) * These LCB defaults on emulator _s are good, nothing to do here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) * LCB_CFG_TX_FIFOS_RADR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) * LCB_CFG_RX_FIFOS_RADR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) * LCB_CFG_LN_DCLK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) * LCB_CFG_IGNORE_LOST_RCLK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) if (is_emulator_s(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) /* else this is _p */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) version = emulator_rev(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) if (!is_ax(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) version = 0x2d; /* all B0 use 0x2d or higher settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) if (version <= 0x12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) /* release 0x12 and below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) rx_radr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) } else if (version <= 0x18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) /* release 0x13 up to 0x18 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) rx_radr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) } else if (version == 0x19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) /* release 0x19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) rx_radr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) } else if (version == 0x1a) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) /* release 0x1a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) rx_radr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) /* release 0x1b and higher */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) rx_radr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) * Handle a SMA idle message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) * This is a work-queue function outside of the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) void handle_sma_message(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) sma_message_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) u64 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) * msg is bytes 1-4 of the 40-bit idle message - the command code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) * is stripped off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) ret = read_idle_sma(dd, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) * React to the SMA message. Byte[1] (0 for us) is the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) switch (msg & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) case SMA_IDLE_ARM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) * See OPAv1 table 9-14 - HFI and External Switch Ports Key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) * State Transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) * Only expected in INIT or ARMED, discard otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) ppd->neighbor_normal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) case SMA_IDLE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) * See OPAv1 table 9-14 - HFI and External Switch Ports Key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) * State Transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) * Can activate the node. Discard otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) if (ppd->host_link_state == HLS_UP_ARMED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) ppd->is_active_optimize_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) ppd->neighbor_normal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) ret = set_link_state(ppd, HLS_UP_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) dd_dev_err(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) "%s: received Active SMA idle message, couldn't set link to Active\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) "%s: received unexpected SMA idle message 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) __func__, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) u64 rcvctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) spin_lock_irqsave(&dd->rcvctrl_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) rcvctrl = read_csr(dd, RCV_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) rcvctrl |= add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) rcvctrl &= ~clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) write_csr(dd, RCV_CTRL, rcvctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) adjust_rcvctrl(dd, add, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) adjust_rcvctrl(dd, 0, clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) * Called from all interrupt handlers to start handling an SPC freeze.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) struct send_context *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) int sc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) if (flags & FREEZE_SELF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) /* enter frozen mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) dd->flags |= HFI1_FROZEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) /* notify all SDMA engines that they are going into a freeze */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) SCF_LINK_DOWN : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) /* do halt pre-handling on all enabled send contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) for (i = 0; i < dd->num_send_contexts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) sc = dd->send_contexts[i].sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) if (sc && (sc->flags & SCF_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) sc_stop(sc, sc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) /* Send context are frozen. Notify user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) if (flags & FREEZE_ABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) "Aborted freeze recovery. Please REBOOT system\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) /* queue non-interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) queue_work(ppd->hfi1_wq, &ppd->freeze_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) * depending on the "freeze" parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) * No need to return an error if it times out, our only option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) * is to proceed anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) reg = read_csr(dd, CCE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) if (freeze) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) /* waiting until all indicators are set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) if ((reg & ALL_FROZE) == ALL_FROZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) return; /* all done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) /* waiting until all indicators are clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) if ((reg & ALL_FROZE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) return; /* all done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) freeze ? "" : "un", reg & ALL_FROZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) freeze ? ALL_FROZE : 0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) usleep_range(80, 120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) * Do all freeze handling for the RXE block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) static void rxe_freeze(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) /* disable port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) /* disable all receive contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) for (i = 0; i < dd->num_rcv_contexts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) * Unfreeze handling for the RXE block - kernel contexts only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) * This will also enable the port. User contexts will do unfreeze
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) * handling on a per-context basis as they call into the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) u32 rcvmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) /* enable all kernel contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) for (i = 0; i < dd->num_rcv_contexts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) rcd = hfi1_rcd_get_by_index(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) /* Ensure all non-user contexts(including vnic) are enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) if (!rcd ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) rcvmask = HFI1_RCVCTRL_CTXT_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) hfi1_rcvctrl(dd, rcvmask, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) /* enable port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) * Non-interrupt SPC freeze handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) * This is a work-queue function outside of the triggering interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) void handle_freeze(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) freeze_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) /* wait for freeze indicators on all affected blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) wait_for_freeze_status(dd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) /* SPC is now frozen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) /* do send PIO freeze steps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) pio_freeze(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) /* do send DMA freeze steps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) sdma_freeze(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) /* do send egress freeze steps - nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) /* do receive freeze steps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) rxe_freeze(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) * Unfreeze the hardware - clear the freeze, wait for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) * block's frozen bit to clear, then clear the frozen flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) wait_for_freeze_status(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) if (is_ax(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) wait_for_freeze_status(dd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) wait_for_freeze_status(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) /* do send PIO unfreeze steps for kernel contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) pio_kernel_unfreeze(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) /* do send DMA unfreeze steps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) sdma_unfreeze(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) /* do send egress unfreeze steps - nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) /* do receive unfreeze steps for kernel contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) rxe_kernel_unfreeze(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) * The unfreeze procedure touches global device registers when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) * it disables and re-enables RXE. Mark the device unfrozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) * after all that is done so other parts of the driver waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) * for the device to unfreeze don't do things out of order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) * The above implies that the meaning of HFI1_FROZEN flag is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) * "Device has gone into freeze mode and freeze mode handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) * is still in progress."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) * The flag will be removed when freeze mode processing has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) dd->flags &= ~HFI1_FROZEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) wake_up(&dd->event_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) /* no longer frozen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) * update_xmit_counters - update PortXmitWait/PortVlXmitWait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) * counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) * @ppd: info of physical Hfi port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) * @link_width: new link width after link up or downgrade
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) * Update the PortXmitWait and PortVlXmitWait counters after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) * a link up or downgrade event to reflect a link width change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) u16 tx_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) u16 link_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) tx_width = tx_link_width(link_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) link_speed = get_link_speed(ppd->link_speed_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) * There are C_VL_COUNT number of PortVLXmitWait counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) for (i = 0; i < C_VL_COUNT + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) get_xmit_wait_counters(ppd, tx_width, link_speed, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) * Handle a link up interrupt from the 8051.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) * This is a work-queue function outside of the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) void handle_link_up(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) link_up_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) set_link_state(ppd, HLS_UP_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) read_ltp_rtt(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) * OPA specifies that certain counters are cleared on a transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) * to link up, so do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) clear_linkup_counters(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) * And (re)set link up default values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) set_linkup_defaults(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) * Set VL15 credits. Use cached value from verify cap interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) * In case of quick linkup or simulator, vl15 value will be set by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) * handle_linkup_change. VerifyCap interrupt handler will not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) * called in those scenarios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) set_up_vl15(dd, dd->vl15buf_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) /* enforce link speed enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) /* oops - current speed is not enabled, bounce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) ppd->link_speed_active, ppd->link_speed_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) OPA_LINKDOWN_REASON_SPEED_POLICY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) set_link_state(ppd, HLS_DN_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) * Several pieces of LNI information were cached for SMA in ppd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) * Reset these on link down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) static void reset_neighbor_info(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) ppd->neighbor_guid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055) ppd->neighbor_port_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) ppd->neighbor_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) ppd->neighbor_fm_security = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) static const char * const link_down_reason_strs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) [OPA_LINKDOWN_REASON_NONE] = "None",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) "Excessive buffer overrun",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) "Local media not installed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) "End to end not installed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) /* return the neighbor link down reason string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) static const char *link_down_reason_str(u8 reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) const char *str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) if (reason < ARRAY_SIZE(link_down_reason_strs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) str = link_down_reason_strs[reason];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) str = "(invalid)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) * Handle a link down interrupt from the 8051.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) * This is a work-queue function outside of the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) void handle_link_down(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) u8 lcl_reason, neigh_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) u8 link_down_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) link_down_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) int was_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) static const char ldr_str[] = "Link down reason: ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) if ((ppd->host_link_state &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) ppd->port_type == PORT_TYPE_FIXED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) ppd->offline_disabled_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) /* Go offline first, then deal with reading/writing through 8051 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) was_up = !!(ppd->host_link_state & HLS_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) set_link_state(ppd, HLS_DN_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) xchg(&ppd->is_link_down_queued, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) if (was_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) lcl_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) /* link down reason is only valid if the link was up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) read_link_down_reason(ppd->dd, &link_down_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) switch (link_down_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) case LDR_LINK_TRANSFER_ACTIVE_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) /* the link went down, no idle message reason */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) dd_dev_info(ppd->dd, "%sUnexpected link down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) ldr_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) * The neighbor reason is only valid if an idle message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) * was received for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) read_planned_down_reason_code(ppd->dd, &neigh_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) dd_dev_info(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) "%sNeighbor link down message %d, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) ldr_str, neigh_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) link_down_reason_str(neigh_reason));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) case LDR_RECEIVED_HOST_OFFLINE_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) dd_dev_info(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) "%sHost requested link to go offline\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) ldr_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) ldr_str, link_down_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) * If no reason, assume peer-initiated but missed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) * LinkGoingDown idle flits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) if (neigh_reason == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) /* went down while polling or going up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) /* inform the SMA when the link transitions from up to down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) if (was_up && ppd->local_link_down_reason.sma == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) ppd->neigh_link_down_reason.sma == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) ppd->local_link_down_reason.sma =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) ppd->local_link_down_reason.latest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) ppd->neigh_link_down_reason.sma =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) ppd->neigh_link_down_reason.latest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) reset_neighbor_info(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) /* disable the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) * If there is no cable attached, turn the DC off. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) * start the link bring up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) dc_shutdown(ppd->dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) void handle_link_bounce(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) link_bounce_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) * Only do something if the link is currently up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) if (ppd->host_link_state & HLS_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) set_link_state(ppd, HLS_DN_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) __func__, link_state_name(ppd->host_link_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) * Mask conversion: Capability exchange to Port LTP. The capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) * exchange has an implicit 16b CRC that is mandatory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) static int cap_to_port_ltp(int cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) if (cap & CAP_CRC_14B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) port_ltp |= PORT_LTP_CRC_MODE_14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) if (cap & CAP_CRC_48B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) port_ltp |= PORT_LTP_CRC_MODE_48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) if (cap & CAP_CRC_12B_16B_PER_LANE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) return port_ltp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) * Convert an OPA Port LTP mask to capability mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) int port_ltp_to_cap(int port_ltp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) int cap_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) if (port_ltp & PORT_LTP_CRC_MODE_14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) cap_mask |= CAP_CRC_14B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) if (port_ltp & PORT_LTP_CRC_MODE_48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) cap_mask |= CAP_CRC_48B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) cap_mask |= CAP_CRC_12B_16B_PER_LANE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) return cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) static int lcb_to_port_ltp(int lcb_crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) int port_ltp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) else if (lcb_crc == LCB_CRC_48B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) port_ltp = PORT_LTP_CRC_MODE_48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) else if (lcb_crc == LCB_CRC_14B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) port_ltp = PORT_LTP_CRC_MODE_14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) port_ltp = PORT_LTP_CRC_MODE_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) return port_ltp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) if (ppd->pkeys[2] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) ppd->pkeys[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) hfi1_event_pkey_change(ppd->dd, ppd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) * Convert the given link width to the OPA link width bitmask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) switch (width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) * Simulator and quick linkup do not set the width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) * Just set it to 4x without complaint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) return OPA_LINK_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) return 0; /* no lanes up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) case 1: return OPA_LINK_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) case 2: return OPA_LINK_WIDTH_2X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) case 3: return OPA_LINK_WIDTH_3X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) case 4: return OPA_LINK_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) dd_dev_info(dd, "%s: invalid width %d, using 4\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) __func__, width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) return OPA_LINK_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) * Do a population count on the bottom nibble.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) static const u8 bit_counts[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) static inline u8 nibble_to_count(u8 nibble)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) return bit_counts[nibble & 0xf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) * Read the active lane information from the 8051 registers and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) * their widths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) * Active lane information is found in these 8051 registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) * enable_lane_tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) * enable_lane_rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) u16 *rx_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) u16 tx, rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) u8 enable_lane_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) u8 enable_lane_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) u8 tx_polarity_inversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) u8 rx_polarity_inversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) u8 max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) /* read the active lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) &rx_polarity_inversion, &max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) read_local_lni(dd, &enable_lane_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) /* convert to counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) tx = nibble_to_count(enable_lane_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) rx = nibble_to_count(enable_lane_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) * Set link_speed_active here, overriding what was set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) * handle_verify_cap(). The ASIC 8051 firmware does not correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) * set the max_rate field in handle_verify_cap until v0.19.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) if ((dd->icode == ICODE_RTL_SILICON) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) /* max_rate: 0 = 12.5G, 1 = 25G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) switch (max_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) "%s: unexpected max rate %d, using 25Gb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) __func__, (int)max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) enable_lane_tx, tx, enable_lane_rx, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) *tx_width = link_width_to_bits(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) *rx_width = link_width_to_bits(dd, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) * Valid after the end of VerifyCap and during LinkUp. Does not change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) * after link up. I.e. look elsewhere for downgrade information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) * Bits are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) * + bits [7:4] contain the number of active transmitters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) * + bits [3:0] contain the number of active receivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) * These are numbers 1 through 4 and can be different values if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) * link is asymmetric.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) * verify_cap_local_fm_link_width[0] retains its original value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) u16 *rx_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) u16 widths, tx, rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) u8 misc_bits, local_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) u16 active_tx, active_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) tx = widths >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) rx = (widths >> 8) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) *tx_width = link_width_to_bits(dd, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) *rx_width = link_width_to_bits(dd, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) /* print the active widths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) get_link_widths(dd, &active_tx, &active_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) * Set ppd->link_width_active and ppd->link_width_downgrade_active using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) * hardware information when the link first comes up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) * The link width is not available until after VerifyCap.AllFramesReceived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) * (the trigger for handle_verify_cap), so this is outside that routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) * and should be called when the 8051 signals linkup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) void get_linkup_link_widths(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) u16 tx_width, rx_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) /* get end-of-LNI link widths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) get_linkup_widths(ppd->dd, &tx_width, &rx_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) /* use tx_width as the link is supposed to be symmetric on link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) ppd->link_width_active = tx_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) /* link width downgrade active (LWD.A) starts out matching LW.A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) ppd->link_width_downgrade_tx_active = ppd->link_width_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) ppd->link_width_downgrade_rx_active = ppd->link_width_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) /* per OPA spec, on link up LWD.E resets to LWD.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) /* cache the active egress rate (units {10^6 bits/sec]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) ppd->current_egress_rate = active_egress_rate(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) * Handle a verify capabilities interrupt from the 8051.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) * This is a work-queue function outside of the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) void handle_verify_cap(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) link_vc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) u8 power_management;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) u8 continuous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) u8 vcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) u8 vau;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) u8 z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) u16 vl15buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) u16 link_widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) u16 crc_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) u16 crc_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) u16 device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) u16 active_tx, active_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) u8 partner_supported_crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) u8 remote_tx_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) u8 device_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) set_link_state(ppd, HLS_VERIFY_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) lcb_shutdown(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) adjust_lcb_for_fpga_serdes(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) read_vc_remote_phy(dd, &power_management, &continuous);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) &partner_supported_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) read_remote_device_id(dd, &device_id, &device_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) /* print the active widths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) get_link_widths(dd, &active_tx, &active_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) (int)power_management, (int)continuous);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) (int)vau, (int)z, (int)vcu, (int)vl15buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) (int)partner_supported_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) (u32)remote_tx_rate, (u32)link_widths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) (u32)device_id, (u32)device_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) * The peer vAU value just read is the peer receiver value. HFI does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) * not support a transmit vAU of 0 (AU == 8). We advertised that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) * with Z=1 in the fabric capabilities sent to the peer. The peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) * will see our Z=1, and, if it advertised a vAU of 0, will move its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) * receive to vAU of 1 (AU == 16). Do the same here. We do not care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) * about the peer Z value - our sent vAU is 3 (hardwired) and is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) * subject to the Z value exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) if (vau == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) vau = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) set_up_vau(dd, vau);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) * Set VL15 credits to 0 in global credit register. Cache remote VL15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) * credits value and wait for link-up interrupt ot set it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522) set_up_vl15(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) dd->vl15buf_cached = vl15buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) /* set up the LCB CRC mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) /* order is important: use the lowest bit in common */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) if (crc_mask & CAP_CRC_14B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) crc_val = LCB_CRC_14B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) else if (crc_mask & CAP_CRC_48B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) crc_val = LCB_CRC_48B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) crc_val = LCB_CRC_12B_16B_PER_LANE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) crc_val = LCB_CRC_16B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) write_csr(dd, DC_LCB_CFG_CRC_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) /* set (14b only) or clear sideband credit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) reg = read_csr(dd, SEND_CM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) write_csr(dd, SEND_CM_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) write_csr(dd, SEND_CM_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) ppd->link_speed_active = 0; /* invalid value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553) if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) switch (remote_tx_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) ppd->link_speed_active = OPA_LINK_SPEED_25G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) /* actual rate is highest bit of the ANDed rates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) u8 rate = remote_tx_rate & ppd->local_tx_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) if (rate & 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) ppd->link_speed_active = OPA_LINK_SPEED_25G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) else if (rate & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) if (ppd->link_speed_active == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573) dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) __func__, (int)remote_tx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) ppd->link_speed_active = OPA_LINK_SPEED_25G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) * Cache the values of the supported, enabled, and active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) * LTP CRC modes to return in 'portinfo' queries. But the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) * flags that are returned in the portinfo query differ from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) * what's in the link_crc_mask, crc_sizes, and crc_val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) * variables. Convert these here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) /* supported crc modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) ppd->port_ltp_crc_mode |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) /* enabled crc modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) /* active crc mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593) /* set up the remote credit return table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) assign_remote_cm_au_table(dd, vcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) * The LCB is reset on entry to handle_verify_cap(), so this must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) * be applied on every link up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) * Adjust LCB error kill enable to kill the link if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601) * these RBUF errors are seen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) * REPLAY_BUF_MBE_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603) * FLIT_INPUT_BUF_MBE_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) if (is_ax(dd)) { /* fixed in B0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612) /* pull LCB fifos out of reset - all fifo clocks must be stable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) /* give 8051 access to the LCB CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) set_8051_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) /* tell the 8051 to go to LinkUp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) set_link_state(ppd, HLS_GOING_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) * apply_link_downgrade_policy - Apply the link width downgrade enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) * policy against the current active link widths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) * @ppd: info of physical Hfi port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627) * @refresh_widths: True indicates link downgrade event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) * @return: True indicates a successful link downgrade. False indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629) * link downgrade event failed and the link will bounce back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) * default link width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) * Called when the enabled policy changes or the active link widths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633) * change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) * Refresh_widths indicates that a link downgrade occurred. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) * link_downgraded variable is set by refresh_widths and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) * determines the success/failure of the policy application.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) bool refresh_widths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641) int do_bounce = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642) int tries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643) u16 lwde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) u16 tx, rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645) bool link_downgraded = refresh_widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) /* use the hls lock to avoid a race with actual link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648) tries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650) mutex_lock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651) /* only apply if the link is up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) if (ppd->host_link_state & HLS_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653) /* still going up..wait and retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) if (ppd->host_link_state & HLS_GOING_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655) if (++tries < 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) mutex_unlock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657) usleep_range(100, 120); /* arbitrary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661) "%s: giving up waiting for link state change\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667) lwde = ppd->link_width_downgrade_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669) if (refresh_widths) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670) get_link_widths(ppd->dd, &tx, &rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671) ppd->link_width_downgrade_tx_active = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) ppd->link_width_downgrade_rx_active = rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675) if (ppd->link_width_downgrade_tx_active == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676) ppd->link_width_downgrade_rx_active == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677) /* the 8051 reported a dead link as a downgrade */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679) link_downgraded = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) } else if (lwde == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681) /* downgrade is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683) /* bounce if not at starting active width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684) if ((ppd->link_width_active !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) ppd->link_width_downgrade_tx_active) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686) (ppd->link_width_active !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687) ppd->link_width_downgrade_rx_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689) "Link downgrade is disabled and link has downgraded, downing link\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691) " original 0x%x, tx active 0x%x, rx active 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692) ppd->link_width_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) ppd->link_width_downgrade_tx_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) ppd->link_width_downgrade_rx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) do_bounce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696) link_downgraded = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698) } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) (lwde & ppd->link_width_downgrade_rx_active) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7700) /* Tx or Rx is outside the enabled policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7701) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7702) "Link is outside of downgrade allowed, downing link\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7703) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7704) " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7705) lwde, ppd->link_width_downgrade_tx_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7706) ppd->link_width_downgrade_rx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7707) do_bounce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7708) link_downgraded = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7711) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7712) mutex_unlock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7714) if (do_bounce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7715) set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7716) OPA_LINKDOWN_REASON_WIDTH_POLICY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7717) set_link_state(ppd, HLS_DN_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7718) start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7721) return link_downgraded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7725) * Handle a link downgrade interrupt from the 8051.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7727) * This is a work-queue function outside of the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7729) void handle_link_downgrade(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7731) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7732) link_downgrade_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7734) dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7735) if (apply_link_downgrade_policy(ppd, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7736) update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7739) static char *dcc_err_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7741) return flag_string(buf, buf_len, flags, dcc_err_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7742) ARRAY_SIZE(dcc_err_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7745) static char *lcb_err_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7747) return flag_string(buf, buf_len, flags, lcb_err_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7748) ARRAY_SIZE(lcb_err_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7751) static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7753) return flag_string(buf, buf_len, flags, dc8051_err_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7754) ARRAY_SIZE(dc8051_err_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7757) static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7759) return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7760) ARRAY_SIZE(dc8051_info_err_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7763) static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7765) return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7766) ARRAY_SIZE(dc8051_info_host_msg_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7769) static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7771) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7772) u64 info, err, host_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7773) int queue_link_down = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7774) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7776) /* look at the flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7777) if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7778) /* 8051 information set by firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7779) /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7780) info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7781) err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7782) & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7783) host_msg = (info >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7784) DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7785) & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7788) * Handle error flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7790) if (err & FAILED_LNI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7792) * LNI error indications are cleared by the 8051
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7793) * only when starting polling. Only pay attention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7794) * to them when in the states that occur during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7795) * LNI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7797) if (ppd->host_link_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7798) & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7799) queue_link_down = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7800) dd_dev_info(dd, "Link error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7801) dc8051_info_err_string(buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7802) sizeof(buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7803) err &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7804) FAILED_LNI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7806) err &= ~(u64)FAILED_LNI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7808) /* unknown frames can happen durning LNI, just count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7809) if (err & UNKNOWN_FRAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7810) ppd->unknown_frame_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7811) err &= ~(u64)UNKNOWN_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7813) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7814) /* report remaining errors, but do not do anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7815) dd_dev_err(dd, "8051 info error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7816) dc8051_info_err_string(buf, sizeof(buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7817) err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7821) * Handle host message flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7823) if (host_msg & HOST_REQ_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7825) * Presently, the driver does a busy wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7826) * host requests to complete. This is only an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7827) * informational message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7828) * NOTE: The 8051 clears the host message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7829) * information *on the next 8051 command*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7830) * Therefore, when linkup is achieved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7831) * this flag will still be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7833) host_msg &= ~(u64)HOST_REQ_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7835) if (host_msg & BC_SMA_MSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7836) queue_work(ppd->link_wq, &ppd->sma_message_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7837) host_msg &= ~(u64)BC_SMA_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7839) if (host_msg & LINKUP_ACHIEVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7840) dd_dev_info(dd, "8051: Link up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7841) queue_work(ppd->link_wq, &ppd->link_up_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7842) host_msg &= ~(u64)LINKUP_ACHIEVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7844) if (host_msg & EXT_DEVICE_CFG_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7845) handle_8051_request(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7846) host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7848) if (host_msg & VERIFY_CAP_FRAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7849) queue_work(ppd->link_wq, &ppd->link_vc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7850) host_msg &= ~(u64)VERIFY_CAP_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7852) if (host_msg & LINK_GOING_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7853) const char *extra = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7854) /* no downgrade action needed if going down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7855) if (host_msg & LINK_WIDTH_DOWNGRADED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7856) host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7857) extra = " (ignoring downgrade)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7859) dd_dev_info(dd, "8051: Link down%s\n", extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7860) queue_link_down = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7861) host_msg &= ~(u64)LINK_GOING_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7863) if (host_msg & LINK_WIDTH_DOWNGRADED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7864) queue_work(ppd->link_wq, &ppd->link_downgrade_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7865) host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7867) if (host_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7868) /* report remaining messages, but do not do anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7869) dd_dev_info(dd, "8051 info host message: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7870) dc8051_info_host_msg_string(buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7871) sizeof(buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7872) host_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7875) reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7877) if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7879) * Lost the 8051 heartbeat. If this happens, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7880) * receive constant interrupts about it. Disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7881) * the interrupt after the first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7883) dd_dev_err(dd, "Lost 8051 heartbeat\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7884) write_csr(dd, DC_DC8051_ERR_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7885) read_csr(dd, DC_DC8051_ERR_EN) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7886) ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7888) reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7890) if (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7891) /* report the error, but do not do anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7892) dd_dev_err(dd, "8051 error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7893) dc8051_err_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7896) if (queue_link_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7898) * if the link is already going down or disabled, do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7899) * queue another. If there's a link down entry already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7900) * queued, don't queue another one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7902) if ((ppd->host_link_state &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7903) (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7904) ppd->link_enabled == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7905) dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7906) __func__, ppd->host_link_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7907) ppd->link_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7908) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7909) if (xchg(&ppd->is_link_down_queued, 1) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7910) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7911) "%s: link down request already queued\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7912) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7913) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7914) queue_work(ppd->link_wq, &ppd->link_down_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7919) static const char * const fm_config_txt[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7920) [0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7921) "BadHeadDist: Distance violation between two head flits",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7922) [1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7923) "BadTailDist: Distance violation between two tail flits",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7924) [2] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7925) "BadCtrlDist: Distance violation between two credit control flits",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7926) [3] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7927) "BadCrdAck: Credits return for unsupported VL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7928) [4] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7929) "UnsupportedVLMarker: Received VL Marker",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7930) [5] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7931) "BadPreempt: Exceeded the preemption nesting level",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7932) [6] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7933) "BadControlFlit: Received unsupported control flit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7934) /* no 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7935) [8] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7936) "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7937) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7939) static const char * const port_rcv_txt[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7940) [1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7941) "BadPktLen: Illegal PktLen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7942) [2] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7943) "PktLenTooLong: Packet longer than PktLen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7944) [3] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7945) "PktLenTooShort: Packet shorter than PktLen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7946) [4] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7947) "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7948) [5] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7949) "BadDLID: Illegal DLID (0, doesn't match HFI)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7950) [6] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7951) "BadL2: Illegal L2 opcode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7952) [7] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7953) "BadSC: Unsupported SC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7954) [9] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7955) "BadRC: Illegal RC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7956) [11] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7957) "PreemptError: Preempting with same VL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7958) [12] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7959) "PreemptVL15: Preempting a VL15 packet",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7960) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7962) #define OPA_LDR_FMCONFIG_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7963) #define OPA_LDR_PORTRCV_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7964) static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7966) u64 info, hdr0, hdr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7967) const char *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7968) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7969) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7970) u8 lcl_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7971) int do_bounce = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7973) if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7974) if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7975) info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7976) dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7977) /* set status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7978) dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7980) reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7983) if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7984) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7985) /* this counter saturates at (2^32) - 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7986) if (ppd->link_downed < (u32)UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7987) ppd->link_downed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7988) reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7991) if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7992) u8 reason_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7994) info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7995) if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7996) dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7997) /* set status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7998) dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8000) switch (info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8001) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8002) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8003) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8004) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8005) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8006) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8007) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8008) extra = fm_config_txt[info];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8010) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8011) extra = fm_config_txt[info];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8012) if (ppd->port_error_action &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8013) OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8014) do_bounce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8015) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8016) * lcl_reason cannot be derived from info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8017) * for this error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8019) lcl_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8020) OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8023) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8024) reason_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8025) snprintf(buf, sizeof(buf), "reserved%lld", info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8026) extra = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8030) if (reason_valid && !do_bounce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8031) do_bounce = ppd->port_error_action &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8032) (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8033) lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8036) /* just report this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8037) dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8038) extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8039) reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8042) if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8043) u8 reason_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8045) info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8046) hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8047) hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8048) if (!(dd->err_info_rcvport.status_and_code &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8049) OPA_EI_STATUS_SMASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8050) dd->err_info_rcvport.status_and_code =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8051) info & OPA_EI_CODE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8052) /* set status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8053) dd->err_info_rcvport.status_and_code |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8054) OPA_EI_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8056) * save first 2 flits in the packet that caused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8057) * the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8059) dd->err_info_rcvport.packet_flit1 = hdr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8060) dd->err_info_rcvport.packet_flit2 = hdr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8062) switch (info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8063) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8064) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8065) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8066) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8067) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8068) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8069) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8070) case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8071) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8072) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8073) extra = port_rcv_txt[info];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8075) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8076) reason_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8077) snprintf(buf, sizeof(buf), "reserved%lld", info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8078) extra = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8082) if (reason_valid && !do_bounce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8083) do_bounce = ppd->port_error_action &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8084) (1 << (OPA_LDR_PORTRCV_OFFSET + info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8085) lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8088) /* just report this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8089) dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8090) " hdr0 0x%llx, hdr1 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8091) extra, hdr0, hdr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8093) reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8096) if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8097) /* informative only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8098) dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8099) reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8101) if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8102) /* informative only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8103) dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8104) reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8107) if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8108) reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8110) /* report any remaining errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8111) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8112) dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8113) dcc_err_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8115) if (lcl_reason == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8116) lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8118) if (do_bounce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8119) dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8120) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8121) set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8122) queue_work(ppd->link_wq, &ppd->link_bounce_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8126) static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8128) char buf[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8130) dd_dev_info(dd, "LCB Error: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8131) lcb_err_string(buf, sizeof(buf), reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8135) * CCE block DC interrupt. Source is < 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8137) static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8139) const struct err_reg_info *eri = &dc_errs[source];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8141) if (eri->handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8142) interrupt_clear_down(dd, 0, eri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8143) } else if (source == 3 /* dc_lbm_int */) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8145) * This indicates that a parity error has occurred on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8146) * address/control lines presented to the LBM. The error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8147) * is a single pulse, there is no associated error flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8148) * and it is non-maskable. This is because if a parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8149) * error occurs on the request the request is dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8150) * This should never occur, but it is nice to know if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8151) * ever does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8153) dd_dev_err(dd, "Parity error in DC LBM block\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8154) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8155) dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8160) * TX block send credit interrupt. Source is < 160.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8162) static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8164) sc_group_release_update(dd, source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8168) * TX block SDMA interrupt. Source is < 48.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8170) * SDMA interrupts are grouped by type:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8172) * 0 - N-1 = SDma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8173) * N - 2N-1 = SDmaProgress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8174) * 2N - 3N-1 = SDmaIdle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8176) static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8178) /* what interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8179) unsigned int what = source / TXE_NUM_SDMA_ENGINES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8180) /* which engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8181) unsigned int which = source % TXE_NUM_SDMA_ENGINES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8183) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8184) dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8185) slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8186) sdma_dumpstate(&dd->per_sdma[which]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8189) if (likely(what < 3 && which < dd->num_sdma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8190) sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8191) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8192) /* should not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8193) dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8198) * is_rcv_avail_int() - User receive context available IRQ handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8199) * @dd: valid dd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8200) * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8202) * RX block receive available interrupt. Source is < 160.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8204) * This is the general interrupt handler for user (PSM) receive contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8205) * and can only be used for non-threaded IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8207) static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8209) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8210) char *err_detail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8212) if (likely(source < dd->num_rcv_contexts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8213) rcd = hfi1_rcd_get_by_index(dd, source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8214) if (rcd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8215) handle_user_interrupt(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8216) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8217) return; /* OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8219) /* received an interrupt, but no rcd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8220) err_detail = "dataless";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8221) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8222) /* received an interrupt, but are not using that context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8223) err_detail = "out of range";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8225) dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8226) err_detail, source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8230) * is_rcv_urgent_int() - User receive context urgent IRQ handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8231) * @dd: valid dd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8232) * @source: logical IRQ source (offset from IS_RCVURGENT_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8234) * RX block receive urgent interrupt. Source is < 160.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8236) * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8238) static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8240) struct hfi1_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8241) char *err_detail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8243) if (likely(source < dd->num_rcv_contexts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8244) rcd = hfi1_rcd_get_by_index(dd, source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8245) if (rcd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8246) handle_user_interrupt(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8247) hfi1_rcd_put(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8248) return; /* OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8250) /* received an interrupt, but no rcd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8251) err_detail = "dataless";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8252) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8253) /* received an interrupt, but are not using that context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8254) err_detail = "out of range";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8256) dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8257) err_detail, source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8261) * Reserved range interrupt. Should not be called in normal operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8263) static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8265) char name[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8267) dd_dev_err(dd, "unexpected %s interrupt\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8268) is_reserved_name(name, sizeof(name), source));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8271) static const struct is_table is_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8273) * start end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8274) * name func interrupt func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8276) { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8277) is_misc_err_name, is_misc_err_int },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8278) { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8279) is_sdma_eng_err_name, is_sdma_eng_err_int },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8280) { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8281) is_sendctxt_err_name, is_sendctxt_err_int },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8282) { IS_SDMA_START, IS_SDMA_IDLE_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8283) is_sdma_eng_name, is_sdma_eng_int },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8284) { IS_VARIOUS_START, IS_VARIOUS_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8285) is_various_name, is_various_int },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8286) { IS_DC_START, IS_DC_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8287) is_dc_name, is_dc_int },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8288) { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8289) is_rcv_avail_name, is_rcv_avail_int },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8290) { IS_RCVURGENT_START, IS_RCVURGENT_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8291) is_rcv_urgent_name, is_rcv_urgent_int },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8292) { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8293) is_send_credit_name, is_send_credit_int},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8294) { IS_RESERVED_START, IS_RESERVED_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8295) is_reserved_name, is_reserved_int},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8299) * Interrupt source interrupt - called when the given source has an interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8300) * Source is a bit index into an array of 64-bit integers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8302) static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8304) const struct is_table *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8306) /* avoids a double compare by walking the table in-order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8307) for (entry = &is_table[0]; entry->is_name; entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8308) if (source <= entry->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8309) trace_hfi1_interrupt(dd, entry, source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8310) entry->is_int(dd, source - entry->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8314) /* fell off the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8315) dd_dev_err(dd, "invalid interrupt source %u\n", source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8318) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8319) * gerneral_interrupt() - General interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8320) * @irq: MSIx IRQ vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8321) * @data: hfi1 devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8323) * This is able to correctly handle all non-threaded interrupts. Receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8324) * context DATA IRQs are threaded and are not supported by this handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8327) irqreturn_t general_interrupt(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8329) struct hfi1_devdata *dd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8330) u64 regs[CCE_NUM_INT_CSRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8331) u32 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8332) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8333) irqreturn_t handled = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8335) this_cpu_inc(*dd->int_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8337) /* phase 1: scan and clear all handled interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8338) for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8339) if (dd->gi_mask[i] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8340) regs[i] = 0; /* used later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8343) regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8344) dd->gi_mask[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8345) /* only clear if anything is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8346) if (regs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8347) write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8350) /* phase 2: call the appropriate handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8351) for_each_set_bit(bit, (unsigned long *)®s[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8352) CCE_NUM_INT_CSRS * 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8353) is_interrupt(dd, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8354) handled = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8357) return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8360) irqreturn_t sdma_interrupt(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8362) struct sdma_engine *sde = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8363) struct hfi1_devdata *dd = sde->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8364) u64 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8366) #ifdef CONFIG_SDMA_VERBOSITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8367) dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8368) slashstrip(__FILE__), __LINE__, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8369) sdma_dumpstate(sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8372) this_cpu_inc(*dd->int_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8374) /* This read_csr is really bad in the hot path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8375) status = read_csr(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8376) CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8377) & sde->imask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8378) if (likely(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8379) /* clear the interrupt(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8380) write_csr(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8381) CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8382) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8384) /* handle the interrupt(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8385) sdma_engine_interrupt(sde, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8386) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8387) dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8388) sde->this_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8390) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8394) * Clear the receive interrupt. Use a read of the interrupt clear CSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8395) * to insure that the write completed. This does NOT guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8396) * queued DMA writes to memory from the chip are pushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8398) static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8400) struct hfi1_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8401) u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8403) write_csr(dd, addr, rcd->imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8404) /* force the above write on the chip and get a value back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8405) (void)read_csr(dd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8408) /* force the receive interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8409) void force_recv_intr(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8411) write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8415) * Return non-zero if a packet is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8417) * This routine is called when rechecking for packets after the RcvAvail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8418) * interrupt has been cleared down. First, do a quick check of memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8419) * a packet present. If not found, use an expensive CSR read of the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8420) * tail to determine the actual tail. The CSR read is necessary because there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8421) * is no method to push pending DMAs to memory other than an interrupt and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8422) * are trying to determine if we need to force an interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8424) static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8426) u32 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8428) if (hfi1_packet_present(rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8429) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8431) /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8432) tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8433) return hfi1_rcd_head(rcd) != tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8436) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8437) * Common code for receive contexts interrupt handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8438) * Update traces, increment kernel IRQ counter and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8439) * setup ASPM when needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8441) static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8443) struct hfi1_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8445) trace_hfi1_receive_interrupt(dd, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8446) this_cpu_inc(*dd->int_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8447) aspm_ctx_disable(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8450) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8451) * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8452) * when there are packets present in the queue. When calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8453) * with interrupts enabled please use hfi1_rcd_eoi_intr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8455) * @rcd: valid receive context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8457) static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8459) if (!rcd->rcvhdrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8460) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8461) clear_recv_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8462) if (check_packet_present(rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8463) force_recv_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8467) * hfi1_rcd_eoi_intr() - End of Interrupt processing action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8469) * @rcd: Ptr to hfi1_ctxtdata of receive context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8470) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8471) * Hold IRQs so we can safely clear the interrupt and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8472) * recheck for a packet that may have arrived after the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8473) * check and the interrupt clear. If a packet arrived, force another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8474) * interrupt. This routine can be called at the end of receive packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8475) * processing in interrupt service routines, interrupt service thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8476) * and softirqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8478) static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8480) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8482) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8483) __hfi1_rcd_eoi_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8484) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8487) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8488) * hfi1_netdev_rx_napi - napi poll function to move eoi inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8489) * @napi - pointer to napi object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8490) * @budget - netdev budget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8492) int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8494) struct hfi1_netdev_rxq *rxq = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8495) struct hfi1_netdev_rxq, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8496) struct hfi1_ctxtdata *rcd = rxq->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8497) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8499) work_done = rcd->do_interrupt(rcd, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8501) if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8502) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8503) hfi1_rcd_eoi_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8506) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8509) /* Receive packet napi handler for netdevs VNIC and AIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8510) irqreturn_t receive_context_interrupt_napi(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8512) struct hfi1_ctxtdata *rcd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8514) receive_interrupt_common(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8516) if (likely(rcd->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8517) if (likely(napi_schedule_prep(rcd->napi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8518) __napi_schedule_irqoff(rcd->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8519) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8520) __hfi1_rcd_eoi_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8521) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8522) WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8523) rcd->ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8524) __hfi1_rcd_eoi_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8527) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8531) * Receive packet IRQ handler. This routine expects to be on its own IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8532) * This routine will try to handle packets immediately (latency), but if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8533) * it finds too many, it will invoke the thread handler (bandwitdh). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8534) * chip receive interrupt is *not* cleared down until this or the thread (if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8535) * invoked) is finished. The intent is to avoid extra interrupts while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8536) * are processing packets anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8538) irqreturn_t receive_context_interrupt(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8540) struct hfi1_ctxtdata *rcd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8541) int disposition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8543) receive_interrupt_common(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8545) /* receive interrupt remains blocked while processing packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8546) disposition = rcd->do_interrupt(rcd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8549) * Too many packets were seen while processing packets in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8550) * IRQ handler. Invoke the handler thread. The receive interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8551) * remains blocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8553) if (disposition == RCV_PKT_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8554) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8556) __hfi1_rcd_eoi_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8557) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8561) * Receive packet thread handler. This expects to be invoked with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8562) * receive interrupt still blocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8564) irqreturn_t receive_context_thread(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8566) struct hfi1_ctxtdata *rcd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8568) /* receive interrupt is still blocked from the IRQ handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8569) (void)rcd->do_interrupt(rcd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8571) hfi1_rcd_eoi_intr(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8573) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8576) /* ========================================================================= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8578) u32 read_physical_state(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8580) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8582) reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8583) return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8584) & DC_DC8051_STS_CUR_STATE_PORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8587) u32 read_logical_state(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8589) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8591) reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8592) return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8593) & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8596) static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8598) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8600) reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8601) /* clear current state, set new state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8602) reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8603) reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8604) write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8608) * Use the 8051 to read a LCB CSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8610) static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8612) u32 regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8613) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8615) if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8616) if (acquire_lcb_access(dd, 0) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8617) *data = read_csr(dd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8618) release_lcb_access(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8619) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8621) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8624) /* register is an index of LCB registers: (offset - base) / 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8625) regno = (addr - DC_LCB_CFG_RUN) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8626) ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8627) if (ret != HCMD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8628) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8629) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8633) * Provide a cache for some of the LCB registers in case the LCB is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8634) * unavailable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8635) * (The LCB is unavailable in certain link states, for example.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8637) struct lcb_datum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8638) u32 off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8639) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8640) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8642) static struct lcb_datum lcb_cache[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8643) { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8644) { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8645) { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8646) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8648) static void update_lcb_cache(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8650) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8651) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8652) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8654) for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8655) ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8657) /* Update if we get good data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8658) if (likely(ret != -EBUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8659) lcb_cache[i].val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8663) static int read_lcb_cache(u32 off, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8665) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8667) for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8668) if (lcb_cache[i].off == off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8669) *val = lcb_cache[i].val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8670) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8674) pr_warn("%s bad offset 0x%x\n", __func__, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8675) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8679) * Read an LCB CSR. Access may not be in host control, so check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8680) * Return 0 on success, -EBUSY on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8682) int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8684) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8686) /* if up, go through the 8051 for the value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8687) if (ppd->host_link_state & HLS_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8688) return read_lcb_via_8051(dd, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8689) /* if going up or down, check the cache, otherwise, no access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8690) if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8691) if (read_lcb_cache(addr, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8692) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8696) /* otherwise, host has access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8697) *data = read_csr(dd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8702) * Use the 8051 to write a LCB CSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8704) static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8706) u32 regno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8707) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8709) if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8710) (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8711) if (acquire_lcb_access(dd, 0) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8712) write_csr(dd, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8713) release_lcb_access(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8714) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8716) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8719) /* register is an index of LCB registers: (offset - base) / 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8720) regno = (addr - DC_LCB_CFG_RUN) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8721) ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8722) if (ret != HCMD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8723) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8724) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8728) * Write an LCB CSR. Access may not be in host control, so check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8729) * Return 0 on success, -EBUSY on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8731) int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8733) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8735) /* if up, go through the 8051 for the value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8736) if (ppd->host_link_state & HLS_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8737) return write_lcb_via_8051(dd, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8738) /* if going up or down, no access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8739) if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8740) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8741) /* otherwise, host has access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8742) write_csr(dd, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8747) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8748) * < 0 = Linux error, not able to get access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8749) * > 0 = 8051 command RETURN_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8751) static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8752) u64 *out_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8754) u64 reg, completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8755) int return_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8756) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8758) hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8760) mutex_lock(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8762) /* We can't send any commands to the 8051 if it's in reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8763) if (dd->dc_shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8764) return_code = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8765) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8769) * If an 8051 host command timed out previously, then the 8051 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8770) * stuck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8771) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8772) * On first timeout, attempt to reset and restart the entire DC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8773) * block (including 8051). (Is this too big of a hammer?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8775) * If the 8051 times out a second time, the reset did not bring it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8776) * back to healthy life. In that case, fail any subsequent commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8778) if (dd->dc8051_timed_out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8779) if (dd->dc8051_timed_out > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8780) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8781) "Previous 8051 host command timed out, skipping command %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8782) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8783) return_code = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8784) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8786) _dc_shutdown(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8787) _dc_start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8791) * If there is no timeout, then the 8051 command interface is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8792) * waiting for a command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8796) * When writing a LCB CSR, out_data contains the full value to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8797) * to be written, while in_data contains the relative LCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8798) * address in 7:0. Do the work here, rather than the caller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8799) * of distrubting the write data to where it needs to go:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8800) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8801) * Write data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8802) * 39:00 -> in_data[47:8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8803) * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8804) * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8806) if (type == HCMD_WRITE_LCB_CSR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8807) in_data |= ((*out_data) & 0xffffffffffull) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8808) /* must preserve COMPLETED - it is tied to hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8809) reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8810) reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8811) reg |= ((((*out_data) >> 40) & 0xff) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8812) DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8813) | ((((*out_data) >> 48) & 0xffff) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8814) DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8815) write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8819) * Do two writes: the first to stabilize the type and req_data, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8820) * second to activate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8822) reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8823) << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8824) | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8825) << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8826) write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8827) reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8828) write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8830) /* wait for completion, alternate: interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8831) timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8832) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8833) reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8834) completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8835) if (completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8836) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8837) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8838) dd->dc8051_timed_out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8839) dd_dev_err(dd, "8051 host command %u timeout\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8840) if (out_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8841) *out_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8842) return_code = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8843) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8845) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8848) if (out_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8849) *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8850) & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8851) if (type == HCMD_READ_LCB_CSR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8852) /* top 16 bits are in a different register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8853) *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8854) & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8855) << (48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8856) - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8859) return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8860) & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8861) dd->dc8051_timed_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8863) * Clear command for next user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8865) write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8867) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8868) mutex_unlock(&dd->dc8051_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8869) return return_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8872) static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8874) return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8877) int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8878) u8 lane_id, u32 config_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8880) u64 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8881) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8883) data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8884) | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8885) | (u64)config_data << LOAD_DATA_DATA_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8886) ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8887) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8888) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8889) "load 8051 config: field id %d, lane %d, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8890) (int)field_id, (int)lane_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8892) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8896) * Read the 8051 firmware "registers". Use the RAM directly. Always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8897) * set the result, even on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8898) * Return 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8900) int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8901) u32 *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8903) u64 big_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8904) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8905) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8907) /* address start depends on the lane_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8908) if (lane_id < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8909) addr = (4 * NUM_GENERAL_FIELDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8910) + (lane_id * 4 * NUM_LANE_FIELDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8911) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8912) addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8913) addr += field_id * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8915) /* read is in 8-byte chunks, hardware will truncate the address down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8916) ret = read_8051_data(dd, addr, 8, &big_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8918) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8919) /* extract the 4 bytes we want */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8920) if (addr & 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8921) *result = (u32)(big_data >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8922) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8923) *result = (u32)big_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8924) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8925) *result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8926) dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8927) __func__, lane_id, field_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8930) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8933) static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8934) u8 continuous)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8936) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8938) frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8939) | power_management << POWER_MANAGEMENT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8940) return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8941) GENERAL_CONFIG, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8944) static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8945) u16 vl15buf, u8 crc_sizes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8947) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8949) frame = (u32)vau << VAU_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8950) | (u32)z << Z_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8951) | (u32)vcu << VCU_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8952) | (u32)vl15buf << VL15BUF_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8953) | (u32)crc_sizes << CRC_SIZES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8954) return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8955) GENERAL_CONFIG, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8958) static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8959) u8 *flag_bits, u16 *link_widths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8961) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8963) read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8964) &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8965) *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8966) *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8967) *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8970) static int write_vc_local_link_mode(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8971) u8 misc_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8972) u8 flag_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8973) u16 link_widths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8975) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8977) frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8978) | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8979) | (u32)link_widths << LINK_WIDTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8980) return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8981) frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8984) static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8985) u8 device_rev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8987) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8989) frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8990) | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8991) return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8994) static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8995) u8 *device_rev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8997) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8999) read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9000) *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9001) *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9002) & REMOTE_DEVICE_REV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9005) int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9007) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9008) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9010) mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9011) read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9012) /* Clear, then set field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9013) frame &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9014) frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9015) return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9016) frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9019) void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9020) u8 *ver_patch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9022) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9024) read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9025) *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9026) STS_FM_VERSION_MAJOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9027) *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9028) STS_FM_VERSION_MINOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9030) read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9031) *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9032) STS_FM_VERSION_PATCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9035) static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9036) u8 *continuous)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9038) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9040) read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9041) *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9042) & POWER_MANAGEMENT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9043) *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9044) & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9047) static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9048) u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9050) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9052) read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9053) *vau = (frame >> VAU_SHIFT) & VAU_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9054) *z = (frame >> Z_SHIFT) & Z_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9055) *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9056) *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9057) *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9060) static void read_vc_remote_link_width(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9061) u8 *remote_tx_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9062) u16 *link_widths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9064) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9066) read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9067) &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9068) *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9069) & REMOTE_TX_RATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9070) *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9073) static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9075) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9077) read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9078) *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9081) static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9083) read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9086) static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9088) read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9091) void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9093) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9094) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9096) *link_quality = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9097) if (dd->pport->host_link_state & HLS_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9098) ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9099) &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9100) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9101) *link_quality = (frame >> LINK_QUALITY_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9102) & LINK_QUALITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9106) static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9108) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9110) read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9111) *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9114) static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9116) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9118) read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9119) *ldr = (frame & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9122) static int read_tx_settings(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9123) u8 *enable_lane_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9124) u8 *tx_polarity_inversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9125) u8 *rx_polarity_inversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9126) u8 *max_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9128) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9129) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9131) ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9132) *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9133) & ENABLE_LANE_TX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9134) *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9135) & TX_POLARITY_INVERSION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9136) *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9137) & RX_POLARITY_INVERSION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9138) *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9139) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9142) static int write_tx_settings(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9143) u8 enable_lane_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9144) u8 tx_polarity_inversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9145) u8 rx_polarity_inversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9146) u8 max_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9148) u32 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9150) /* no need to mask, all variable sizes match field widths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9151) frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9152) | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9153) | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9154) | max_rate << MAX_RATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9155) return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9159) * Read an idle LCB message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9161) * Returns 0 on success, -EINVAL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9163) static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9165) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9167) ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9168) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9169) dd_dev_err(dd, "read idle message: type %d, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9170) (u32)type, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9171) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9173) dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9174) /* return only the payload as we already know the type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9175) *data_out >>= IDLE_PAYLOAD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9180) * Read an idle SMA message. To be done in response to a notification from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9181) * the 8051.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9183) * Returns 0 on success, -EINVAL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9185) static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9187) return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9188) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9192) * Send an idle LCB message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9194) * Returns 0 on success, -EINVAL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9196) static int send_idle_message(struct hfi1_devdata *dd, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9198) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9200) dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9201) ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9202) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9203) dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9204) data, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9205) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9211) * Send an idle SMA message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9213) * Returns 0 on success, -EINVAL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9215) int send_idle_sma(struct hfi1_devdata *dd, u64 message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9217) u64 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9219) data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9220) ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9221) return send_idle_message(dd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9225) * Initialize the LCB then do a quick link up. This may or may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9226) * in loopback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9228) * return 0 on success, -errno on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9230) static int do_quick_linkup(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9232) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9234) lcb_shutdown(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9236) if (loopback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9237) /* LCB_CFG_LOOPBACK.VAL = 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9238) /* LCB_CFG_LANE_WIDTH.VAL = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9239) write_csr(dd, DC_LCB_CFG_LOOPBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9240) IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9241) write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9244) /* start the LCBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9245) /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9246) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9248) /* simulator only loopback steps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9249) if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9250) /* LCB_CFG_RUN.EN = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9251) write_csr(dd, DC_LCB_CFG_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9252) 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9254) ret = wait_link_transfer_active(dd, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9255) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9256) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9258) write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9259) 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9262) if (!loopback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9264) * When doing quick linkup and not in loopback, both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9265) * sides must be done with LCB set-up before either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9266) * starts the quick linkup. Put a delay here so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9267) * both sides can be started and have a chance to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9268) * done with LCB set up before resuming.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9270) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9271) "Pausing for peer to be finished with LCB set up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9272) msleep(5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9273) dd_dev_err(dd, "Continuing with quick linkup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9276) write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9277) set_8051_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9280) * State "quick" LinkUp request sets the physical link state to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9281) * LinkUp without a verify capability sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9282) * This state is in simulator v37 and later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9284) ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9285) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9286) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9287) "%s: set physical link state to quick LinkUp failed with return %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9288) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9290) set_host_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9291) write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9293) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9294) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9295) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9298) return 0; /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9302) * Do all special steps to set up loopback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9304) static int init_loopback(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9306) dd_dev_info(dd, "Entering loopback mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9308) /* all loopbacks should disable self GUID check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9309) write_csr(dd, DC_DC8051_CFG_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9310) (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9313) * The simulator has only one loopback option - LCB. Switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9314) * to that option, which includes quick link up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9316) * Accept all valid loopback values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9318) if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9319) (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9320) loopback == LOOPBACK_CABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9321) loopback = LOOPBACK_LCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9322) quick_linkup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9327) * SerDes loopback init sequence is handled in set_local_link_attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9329) if (loopback == LOOPBACK_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9332) /* LCB loopback - handled at poll time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9333) if (loopback == LOOPBACK_LCB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9334) quick_linkup = 1; /* LCB is always quick linkup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9336) /* not supported in emulation due to emulation RTL changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9337) if (dd->icode == ICODE_FPGA_EMULATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9338) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9339) "LCB loopback not supported in emulation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9345) /* external cable loopback requires no extra steps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9346) if (loopback == LOOPBACK_CABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9349) dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9350) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9354) * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9355) * used in the Verify Capability link width attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9357) static u16 opa_to_vc_link_widths(u16 opa_widths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9359) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9360) u16 result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9362) static const struct link_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9363) u16 from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9364) u16 to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9365) } opa_link_xlate[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9366) { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9367) { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9368) { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9369) { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9370) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9372) for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9373) if (opa_widths & opa_link_xlate[i].from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9374) result |= opa_link_xlate[i].to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9376) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9380) * Set link attributes before moving to polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9382) static int set_local_link_attributes(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9384) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9385) u8 enable_lane_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9386) u8 tx_polarity_inversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9387) u8 rx_polarity_inversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9388) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9389) u32 misc_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9390) /* reset our fabric serdes to clear any lingering problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9391) fabric_serdes_reset(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9393) /* set the local tx rate - need to read-modify-write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9394) ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9395) &rx_polarity_inversion, &ppd->local_tx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9396) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9397) goto set_local_link_attributes_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9399) if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9400) /* set the tx rate to the fastest enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9401) if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9402) ppd->local_tx_rate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9403) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9404) ppd->local_tx_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9405) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9406) /* set the tx rate to all enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9407) ppd->local_tx_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9408) if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9409) ppd->local_tx_rate |= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9410) if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9411) ppd->local_tx_rate |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9414) enable_lane_tx = 0xF; /* enable all four lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9415) ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9416) rx_polarity_inversion, ppd->local_tx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9417) if (ret != HCMD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9418) goto set_local_link_attributes_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9420) ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9421) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9422) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9423) "Failed to set host interface version, return 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9424) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9425) goto set_local_link_attributes_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9429) * DC supports continuous updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9431) ret = write_vc_local_phy(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9432) 0 /* no power management */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9433) 1 /* continuous updates */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9434) if (ret != HCMD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9435) goto set_local_link_attributes_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9437) /* z=1 in the next call: AU of 0 is not supported by the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9438) ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9439) ppd->port_crc_mode_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9440) if (ret != HCMD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9441) goto set_local_link_attributes_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9444) * SerDes loopback init sequence requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9445) * setting bit 0 of MISC_CONFIG_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9447) if (loopback == LOOPBACK_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9448) misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9451) * An external device configuration request is used to reset the LCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9452) * to retry to obtain operational lanes when the first attempt is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9453) * unsuccesful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9455) if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9456) misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9458) ret = write_vc_local_link_mode(dd, misc_bits, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9459) opa_to_vc_link_widths(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9460) ppd->link_width_enabled));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9461) if (ret != HCMD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9462) goto set_local_link_attributes_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9464) /* let peer know who we are */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9465) ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9466) if (ret == HCMD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9469) set_local_link_attributes_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9470) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9471) "Failed to set local link attributes, return 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9472) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9473) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9477) * Call this to start the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9478) * Do not do anything if the link is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9479) * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9481) int start_link(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9484) * Tune the SerDes to a ballpark setting for optimal signal and bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9485) * error rate. Needs to be done before starting the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9487) tune_serdes(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9489) if (!ppd->driver_link_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9490) dd_dev_info(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9491) "%s: stopping link start because driver is not ready\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9492) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9497) * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9498) * pkey table can be configured properly if the HFI unit is connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9499) * to switch port with MgmtAllowed=NO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9501) clear_full_mgmt_pkey(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9503) return set_link_state(ppd, HLS_DN_POLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9506) static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9508) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9509) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9510) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9513) * Some QSFP cables have a quirk that asserts the IntN line as a side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9514) * effect of power up on plug-in. We ignore this false positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9515) * interrupt until the module has finished powering up by waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9516) * a minimum timeout of the module inrush initialization time of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9517) * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9518) * module have stabilized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9520) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9523) * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9525) timeout = jiffies + msecs_to_jiffies(2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9526) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9527) mask = read_csr(dd, dd->hfi1_id ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9528) ASIC_QSFP2_IN : ASIC_QSFP1_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9529) if (!(mask & QSFP_HFI0_INT_N))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9531) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9532) dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9533) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9536) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9540) static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9542) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9543) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9545) mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9546) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9548) * Clear the status register to avoid an immediate interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9549) * when we re-enable the IntN pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9551) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9552) QSFP_HFI0_INT_N);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9553) mask |= (u64)QSFP_HFI0_INT_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9554) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9555) mask &= ~(u64)QSFP_HFI0_INT_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9557) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9560) int reset_qsfp(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9562) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9563) u64 mask, qsfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9565) /* Disable INT_N from triggering QSFP interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9566) set_qsfp_int_n(ppd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9568) /* Reset the QSFP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9569) mask = (u64)QSFP_HFI0_RESET_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9571) qsfp_mask = read_csr(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9572) dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9573) qsfp_mask &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9574) write_csr(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9575) dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9577) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9579) qsfp_mask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9580) write_csr(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9581) dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9583) wait_for_qsfp_init(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9586) * Allow INT_N to trigger the QSFP interrupt to watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9587) * for alarms and warnings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9589) set_qsfp_int_n(ppd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9592) * After the reset, AOC transmitters are enabled by default. They need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9593) * to be turned off to complete the QSFP setup before they can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9594) * enabled again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9596) return set_qsfp_tx(ppd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9599) static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9600) u8 *qsfp_interrupt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9602) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9604) if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9605) (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9606) dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9607) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9609) if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9610) (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9611) dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9612) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9615) * The remaining alarms/warnings don't matter if the link is down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9617) if (ppd->host_link_state & HLS_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9620) if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9621) (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9622) dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9623) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9625) if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9626) (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9627) dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9628) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9630) /* Byte 2 is vendor specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9632) if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9633) (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9634) dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9635) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9637) if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9638) (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9639) dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9640) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9642) if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9643) (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9644) dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9645) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9647) if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9648) (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9649) dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9650) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9652) if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9653) (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9654) dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9655) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9657) if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9658) (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9659) dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9660) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9662) if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9663) (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9664) dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9665) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9667) if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9668) (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9669) dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9670) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9672) if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9673) (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9674) dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9675) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9677) if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9678) (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9679) dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9680) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9682) if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9683) (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9684) dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9685) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9687) if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9688) (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9689) dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9690) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9692) /* Bytes 9-10 and 11-12 are reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9693) /* Bytes 13-15 are vendor specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9695) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9698) /* This routine will only be scheduled if the QSFP module present is asserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9699) void qsfp_event(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9701) struct qsfp_data *qd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9702) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9703) struct hfi1_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9705) qd = container_of(work, struct qsfp_data, qsfp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9706) ppd = qd->ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9707) dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9709) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9710) if (!qsfp_mod_present(ppd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9711) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9713) if (ppd->host_link_state == HLS_DN_DISABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9714) dd_dev_info(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9715) "%s: stopping link start because link is disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9716) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9717) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9721) * Turn DC back on after cable has been re-inserted. Up until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9722) * now, the DC has been in reset to save power.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9724) dc_start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9726) if (qd->cache_refresh_required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9727) set_qsfp_int_n(ppd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9729) wait_for_qsfp_init(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9732) * Allow INT_N to trigger the QSFP interrupt to watch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9733) * for alarms and warnings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9735) set_qsfp_int_n(ppd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9737) start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9740) if (qd->check_interrupt_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9741) u8 qsfp_interrupt_status[16] = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9743) if (one_qsfp_read(ppd, dd->hfi1_id, 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9744) &qsfp_interrupt_status[0], 16) != 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9745) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9746) "%s: Failed to read status of QSFP module\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9747) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9748) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9749) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9751) handle_qsfp_error_conditions(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9752) ppd, qsfp_interrupt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9753) spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9754) ppd->qsfp_info.check_interrupt_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9755) spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9756) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9761) void init_qsfp_int(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9763) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9764) u64 qsfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9766) qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9767) /* Clear current status to avoid spurious interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9768) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9769) qsfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9770) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9771) qsfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9773) set_qsfp_int_n(ppd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9775) /* Handle active low nature of INT_N and MODPRST_N pins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9776) if (qsfp_mod_present(ppd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9777) qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9778) write_csr(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9779) dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9780) qsfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9782) /* Enable the appropriate QSFP IRQ source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9783) if (!dd->hfi1_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9784) set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9785) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9786) set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9790) * Do a one-time initialize of the LCB block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9792) static void init_lcb(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9794) /* simulator does not correctly handle LCB cclk loopback, skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9795) if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9796) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9798) /* the DC has been reset earlier in the driver load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9800) /* set LCB for cclk loopback on the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9801) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9802) write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9803) write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9804) write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9805) write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9806) write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9807) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9811) * Perform a test read on the QSFP. Return 0 on success, -ERRNO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9812) * on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9814) static int test_qsfp_read(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9816) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9817) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9820) * Report success if not a QSFP or, if it is a QSFP, but the cable is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9821) * not present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9823) if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9824) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9826) /* read byte 2, the status byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9827) ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9828) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9829) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9830) if (ret != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9831) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9833) return 0; /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9837) * Values for QSFP retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9838) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9839) * Give up after 10s (20 x 500ms). The overall timeout was empirically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9840) * arrived at from experience on a large cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9842) #define MAX_QSFP_RETRIES 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9843) #define QSFP_RETRY_WAIT 500 /* msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9846) * Try a QSFP read. If it fails, schedule a retry for later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9847) * Called on first link activation after driver load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9849) static void try_start_link(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9851) if (test_qsfp_read(ppd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9852) /* read failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9853) if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9854) dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9855) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9857) dd_dev_info(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9858) "QSFP not responding, waiting and retrying %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9859) (int)ppd->qsfp_retry_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9860) ppd->qsfp_retry_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9861) queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9862) msecs_to_jiffies(QSFP_RETRY_WAIT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9863) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9865) ppd->qsfp_retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9867) start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9871) * Workqueue function to start the link after a delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9873) void handle_start_link(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9875) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9876) start_link_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9877) try_start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9880) int bringup_serdes(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9882) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9883) u64 guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9884) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9886) if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9887) add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9889) guid = ppd->guids[HFI1_PORT_GUID_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9890) if (!guid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9891) if (dd->base_guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9892) guid = dd->base_guid + ppd->port - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9893) ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9896) /* Set linkinit_reason on power up per OPA spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9897) ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9899) /* one-time init of the LCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9900) init_lcb(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9902) if (loopback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9903) ret = init_loopback(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9904) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9905) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9908) get_port_type(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9909) if (ppd->port_type == PORT_TYPE_QSFP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9910) set_qsfp_int_n(ppd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9911) wait_for_qsfp_init(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9912) set_qsfp_int_n(ppd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9915) try_start_link(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9919) void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9921) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9924) * Shut down the link and keep it down. First turn off that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9925) * driver wants to allow the link to be up (driver_link_ready).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9926) * Then make sure the link is not automatically restarted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9927) * (link_enabled). Cancel any pending restart. And finally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9928) * go offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9930) ppd->driver_link_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9931) ppd->link_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9933) ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9934) flush_delayed_work(&ppd->start_link_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9935) cancel_delayed_work_sync(&ppd->start_link_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9937) ppd->offline_disabled_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9938) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9939) set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9940) OPA_LINKDOWN_REASON_REBOOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9941) set_link_state(ppd, HLS_DN_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9943) /* disable the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9944) clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9945) cancel_work_sync(&ppd->freeze_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9948) static inline int init_cpu_counters(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9950) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9951) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9953) ppd = (struct hfi1_pportdata *)(dd + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9954) for (i = 0; i < dd->num_pports; i++, ppd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9955) ppd->ibport_data.rvp.rc_acks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9956) ppd->ibport_data.rvp.rc_qacks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9957) ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9958) ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9959) ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9960) if (!ppd->ibport_data.rvp.rc_acks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9961) !ppd->ibport_data.rvp.rc_delayed_comp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9962) !ppd->ibport_data.rvp.rc_qacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9963) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9966) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9970) * index is the index into the receive array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9972) void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9973) u32 type, unsigned long pa, u16 order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9975) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9977) if (!(dd->flags & HFI1_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9978) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9980) if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9981) pa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9982) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9983) } else if (type > PT_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9984) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9985) "unexpected receive array type %u for index %u, not handled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9986) type, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9987) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9989) trace_hfi1_put_tid(dd, index, type, pa, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9991) #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9992) reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9993) | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9994) | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9995) << RCV_ARRAY_RT_ADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9996) trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9997) writeq(reg, dd->rcvarray_wc + (index * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9999) if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10001) * Eager entries are written and flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10002) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10003) * Expected entries are flushed every 4 writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10005) flush_wc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10006) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10007) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10010) void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10012) struct hfi1_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10013) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10015) /* this could be optimized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10016) for (i = rcd->eager_base; i < rcd->eager_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10017) rcd->egrbufs.alloced; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10018) hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10020) for (i = rcd->expected_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10021) i < rcd->expected_base + rcd->expected_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10022) hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10025) static const char * const ib_cfg_name_strings[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10026) "HFI1_IB_CFG_LIDLMC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10027) "HFI1_IB_CFG_LWID_DG_ENB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10028) "HFI1_IB_CFG_LWID_ENB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10029) "HFI1_IB_CFG_LWID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10030) "HFI1_IB_CFG_SPD_ENB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10031) "HFI1_IB_CFG_SPD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10032) "HFI1_IB_CFG_RXPOL_ENB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10033) "HFI1_IB_CFG_LREV_ENB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10034) "HFI1_IB_CFG_LINKLATENCY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10035) "HFI1_IB_CFG_HRTBT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10036) "HFI1_IB_CFG_OP_VLS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10037) "HFI1_IB_CFG_VL_HIGH_CAP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10038) "HFI1_IB_CFG_VL_LOW_CAP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10039) "HFI1_IB_CFG_OVERRUN_THRESH",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10040) "HFI1_IB_CFG_PHYERR_THRESH",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10041) "HFI1_IB_CFG_LINKDEFAULT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10042) "HFI1_IB_CFG_PKEYS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10043) "HFI1_IB_CFG_MTU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10044) "HFI1_IB_CFG_LSTATE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10045) "HFI1_IB_CFG_VL_HIGH_LIMIT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10046) "HFI1_IB_CFG_PMA_TICKS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10047) "HFI1_IB_CFG_PORT"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10048) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10050) static const char *ib_cfg_name(int which)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10052) if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10053) return "invalid";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10054) return ib_cfg_name_strings[which];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10057) int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10059) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10060) int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10062) switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10063) case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10064) val = ppd->link_width_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10066) case HFI1_IB_CFG_LWID: /* currently active Link-width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10067) val = ppd->link_width_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10068) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10069) case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10070) val = ppd->link_speed_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10071) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10072) case HFI1_IB_CFG_SPD: /* current Link speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10073) val = ppd->link_speed_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10076) case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10077) case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10078) case HFI1_IB_CFG_LINKLATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10079) goto unimplemented;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10081) case HFI1_IB_CFG_OP_VLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10082) val = ppd->actual_vls_operational;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10084) case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10085) val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10086) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10087) case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10088) val = VL_ARB_LOW_PRIO_TABLE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10089) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10090) case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10091) val = ppd->overrun_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10092) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10093) case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10094) val = ppd->phy_error_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10096) case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10097) val = HLS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10100) case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10101) case HFI1_IB_CFG_PMA_TICKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10102) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10103) unimplemented:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10104) if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10105) dd_dev_info(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10106) dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10107) "%s: which %s: not implemented\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10108) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10109) ib_cfg_name(which));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10110) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10113) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10117) * The largest MAD packet size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10119) #define MAX_MAD_PACKET 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10122) * Return the maximum header bytes that can go on the _wire_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10123) * for this device. This count includes the ICRC which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10124) * not part of the packet held in memory but it is appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10125) * by the HW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10126) * This is dependent on the device's receive header entry size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10127) * HFI allows this to be set per-receive context, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10128) * driver presently enforces a global value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10130) u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10133) * The maximum non-payload (MTU) bytes in LRH.PktLen are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10134) * the Receive Header Entry Size minus the PBC (or RHF) size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10135) * plus one DW for the ICRC appended by HW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10137) * dd->rcd[0].rcvhdrqentsize is in DW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10138) * We use rcd[0] as all context will have the same value. Also,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10139) * the first kernel context would have been allocated by now so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10140) * we are guaranteed a valid value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10142) return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10146) * Set Send Length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10147) * @ppd - per port data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10149) * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10150) * registers compare against LRH.PktLen, so use the max bytes included
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10151) * in the LRH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10153) * This routine changes all VL values except VL15, which it maintains at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10154) * the same value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10156) static void set_send_length(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10158) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10159) u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10160) u32 maxvlmtu = dd->vld[15].mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10161) u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10162) & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10163) SEND_LEN_CHECK1_LEN_VL15_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10164) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10165) u32 thres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10167) for (i = 0; i < ppd->vls_supported; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10168) if (dd->vld[i].mtu > maxvlmtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10169) maxvlmtu = dd->vld[i].mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10170) if (i <= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10171) len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10172) & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10173) ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10174) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10175) len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10176) & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10177) ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10179) write_csr(dd, SEND_LEN_CHECK0, len1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10180) write_csr(dd, SEND_LEN_CHECK1, len2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10181) /* adjust kernel credit return thresholds based on new MTUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10182) /* all kernel receive contexts have the same hdrqentsize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10183) for (i = 0; i < ppd->vls_supported; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10184) thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10185) sc_mtu_to_threshold(dd->vld[i].sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10186) dd->vld[i].mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10187) get_hdrqentsize(dd->rcd[0])));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10188) for (j = 0; j < INIT_SC_PER_VL; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10189) sc_set_cr_threshold(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10190) pio_select_send_context_vl(dd, j, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10191) thres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10193) thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10194) sc_mtu_to_threshold(dd->vld[15].sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10195) dd->vld[15].mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10196) dd->rcd[0]->rcvhdrqentsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10197) sc_set_cr_threshold(dd->vld[15].sc, thres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10199) /* Adjust maximum MTU for the port in DC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10200) dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10201) (ilog2(maxvlmtu >> 8) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10202) len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10203) len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10204) len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10205) DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10206) write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10209) static void set_lidlmc(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10211) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10212) u64 sreg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10213) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10214) u32 mask = ~((1U << ppd->lmc) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10215) u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10216) u32 lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10219) * Program 0 in CSR if port lid is extended. This prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10220) * 9B packets being sent out for large lids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10222) lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10223) c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10224) | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10225) c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10226) << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10227) ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10228) << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10229) write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10232) * Iterate over all the send contexts and set their SLID check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10234) sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10235) SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10236) (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10237) SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10239) for (i = 0; i < chip_send_contexts(dd); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10240) hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10241) i, (u32)sreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10242) write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10245) /* Now we have to do the same thing for the sdma engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10246) sdma_update_lmc(dd, mask, lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10249) static const char *state_completed_string(u32 completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10251) static const char * const state_completed[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10252) "EstablishComm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10253) "OptimizeEQ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10254) "VerifyCap"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10257) if (completed < ARRAY_SIZE(state_completed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10258) return state_completed[completed];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10260) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10263) static const char all_lanes_dead_timeout_expired[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10264) "All lanes were inactive – was the interconnect media removed?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10265) static const char tx_out_of_policy[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10266) "Passing lanes on local port do not meet the local link width policy";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10267) static const char no_state_complete[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10268) "State timeout occurred before link partner completed the state";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10269) static const char * const state_complete_reasons[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10270) [0x00] = "Reason unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10271) [0x01] = "Link was halted by driver, refer to LinkDownReason",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10272) [0x02] = "Link partner reported failure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10273) [0x10] = "Unable to achieve frame sync on any lane",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10274) [0x11] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10275) "Unable to find a common bit rate with the link partner",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10276) [0x12] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10277) "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10278) [0x13] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10279) "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10280) [0x14] = no_state_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10281) [0x15] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10282) "State timeout occurred before link partner identified equalization presets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10283) [0x16] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10284) "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10285) [0x17] = tx_out_of_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10286) [0x20] = all_lanes_dead_timeout_expired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10287) [0x21] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10288) "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10289) [0x22] = no_state_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10290) [0x23] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10291) "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10292) [0x24] = tx_out_of_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10293) [0x30] = all_lanes_dead_timeout_expired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10294) [0x31] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10295) "State timeout occurred waiting for host to process received frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10296) [0x32] = no_state_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10297) [0x33] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10298) "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10299) [0x34] = tx_out_of_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10300) [0x35] = "Negotiated link width is mutually exclusive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10301) [0x36] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10302) "Timed out before receiving verifycap frames in VerifyCap.Exchange",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10303) [0x37] = "Unable to resolve secure data exchange",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10306) static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10307) u32 code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10309) const char *str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10311) if (code < ARRAY_SIZE(state_complete_reasons))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10312) str = state_complete_reasons[code];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10314) if (str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10315) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10316) return "Reserved";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10319) /* describe the given last state complete frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10320) static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10321) const char *prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10323) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10324) u32 success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10325) u32 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10326) u32 reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10327) u32 lanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10330) * Decode frame:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10331) * [ 0: 0] - success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10332) * [ 3: 1] - state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10333) * [ 7: 4] - next state timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10334) * [15: 8] - reason code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10335) * [31:16] - lanes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10337) success = frame & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10338) state = (frame >> 1) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10339) reason = (frame >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10340) lanes = (frame >> 16) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10342) dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10343) prefix, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10344) dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10345) state_completed_string(state), state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10346) dd_dev_err(dd, " state successfully completed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10347) success ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10348) dd_dev_err(dd, " fail reason 0x%x: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10349) reason, state_complete_reason_code_string(ppd, reason));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10350) dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10354) * Read the last state complete frames and explain them. This routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10355) * expects to be called if the link went down during link negotiation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10356) * and initialization (LNI). That is, anywhere between polling and link up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10358) static void check_lni_states(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10360) u32 last_local_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10361) u32 last_remote_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10363) read_last_local_state(ppd->dd, &last_local_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10364) read_last_remote_state(ppd->dd, &last_remote_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10367) * Don't report anything if there is nothing to report. A value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10368) * 0 means the link was taken down while polling and there was no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10369) * training in-process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10371) if (last_local_state == 0 && last_remote_state == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10372) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10374) decode_state_complete(ppd, last_local_state, "transmitted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10375) decode_state_complete(ppd, last_remote_state, "received");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10378) /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10379) static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10381) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10382) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10384) /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10385) timeout = jiffies + msecs_to_jiffies(wait_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10386) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10387) reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10388) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10390) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10391) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10392) "timeout waiting for LINK_TRANSFER_ACTIVE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10393) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10395) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10400) /* called when the logical link state is not down as it should be */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10401) static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10403) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10406) * Bring link up in LCB loopback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10408) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10409) write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10410) DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10412) write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10413) write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10414) write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10415) write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10417) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10418) (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10419) udelay(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10420) write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10421) write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10423) wait_link_transfer_active(dd, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10426) * Bring the link down again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10428) write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10429) write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10430) write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10432) dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10436) * Helper for set_link_state(). Do not call except from that routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10437) * Expects ppd->hls_mutex to be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10439) * @rem_reason value to be sent to the neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10441) * LinkDownReasons only set if transition succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10443) static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10445) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10446) u32 previous_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10447) int offline_state_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10448) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10450) update_lcb_cache(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10452) previous_state = ppd->host_link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10453) ppd->host_link_state = HLS_GOING_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10455) /* start offline transition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10456) ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10458) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10459) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10460) "Failed to transition to Offline link state, return %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10461) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10462) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10464) if (ppd->offline_disabled_reason ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10465) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10466) ppd->offline_disabled_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10467) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10469) offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10470) if (offline_state_ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10471) return offline_state_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10473) /* Disabling AOC transmitters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10474) if (ppd->port_type == PORT_TYPE_QSFP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10475) ppd->qsfp_info.limiting_active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10476) qsfp_mod_present(ppd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10477) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10479) ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10480) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10481) set_qsfp_tx(ppd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10482) release_chip_resource(dd, qsfp_resource(dd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10484) /* not fatal, but should warn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10485) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10486) "Unable to acquire lock to turn off QSFP TX\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10491) * Wait for the offline.Quiet transition if it hasn't happened yet. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10492) * can take a while for the link to go down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10494) if (offline_state_ret != PLS_OFFLINE_QUIET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10495) ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10496) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10497) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10501) * Now in charge of LCB - must be after the physical state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10502) * offline.quiet and before host_link_state is changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10504) set_host_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10505) write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10507) /* make sure the logical state is also down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10508) ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10509) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10510) force_logical_link_state_down(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10512) ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10513) update_statusp(ppd, IB_PORT_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10516) * The LNI has a mandatory wait time after the physical state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10517) * moves to Offline.Quiet. The wait time may be different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10518) * depending on how the link went down. The 8051 firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10519) * will observe the needed wait time and only move to ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10520) * when that is completed. The largest of the quiet timeouts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10521) * is 6s, so wait that long and then at least 0.5s more for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10522) * other transitions, and another 0.5s for a buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10524) ret = wait_fm_ready(dd, 7000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10525) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10526) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10527) "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10528) /* state is really offline, so make it so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10529) ppd->host_link_state = HLS_DN_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10534) * The state is now offline and the 8051 is ready to accept host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10535) * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10536) * - change our state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10537) * - notify others if we were previously in a linkup state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10539) ppd->host_link_state = HLS_DN_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10540) if (previous_state & HLS_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10541) /* went down while link was up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10542) handle_linkup_change(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10543) } else if (previous_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10544) & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10545) /* went down while attempting link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10546) check_lni_states(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10548) /* The QSFP doesn't need to be reset on LNI failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10549) ppd->qsfp_info.reset_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10552) /* the active link width (downgrade) is 0 on link down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10553) ppd->link_width_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10554) ppd->link_width_downgrade_tx_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10555) ppd->link_width_downgrade_rx_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10556) ppd->current_egress_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10560) /* return the link state name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10561) static const char *link_state_name(u32 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10563) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10564) int n = ilog2(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10565) static const char * const names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10566) [__HLS_UP_INIT_BP] = "INIT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10567) [__HLS_UP_ARMED_BP] = "ARMED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10568) [__HLS_UP_ACTIVE_BP] = "ACTIVE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10569) [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10570) [__HLS_DN_POLL_BP] = "POLL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10571) [__HLS_DN_DISABLE_BP] = "DISABLE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10572) [__HLS_DN_OFFLINE_BP] = "OFFLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10573) [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10574) [__HLS_GOING_UP_BP] = "GOING_UP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10575) [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10576) [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10577) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10579) name = n < ARRAY_SIZE(names) ? names[n] : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10580) return name ? name : "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10583) /* return the link state reason name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10584) static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10586) if (state == HLS_UP_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10587) switch (ppd->linkinit_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10588) case OPA_LINKINIT_REASON_LINKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10589) return "(LINKUP)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10590) case OPA_LINKINIT_REASON_FLAPPING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10591) return "(FLAPPING)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10592) case OPA_LINKINIT_OUTSIDE_POLICY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10593) return "(OUTSIDE_POLICY)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10594) case OPA_LINKINIT_QUARANTINED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10595) return "(QUARANTINED)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10596) case OPA_LINKINIT_INSUFIC_CAPABILITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10597) return "(INSUFIC_CAPABILITY)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10598) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10602) return "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10606) * driver_pstate - convert the driver's notion of a port's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10607) * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10608) * Return -1 (converted to a u32) to indicate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10610) u32 driver_pstate(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10612) switch (ppd->host_link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10613) case HLS_UP_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10614) case HLS_UP_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10615) case HLS_UP_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10616) return IB_PORTPHYSSTATE_LINKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10617) case HLS_DN_POLL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10618) return IB_PORTPHYSSTATE_POLLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10619) case HLS_DN_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10620) return IB_PORTPHYSSTATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10621) case HLS_DN_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10622) return OPA_PORTPHYSSTATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10623) case HLS_VERIFY_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10624) return IB_PORTPHYSSTATE_TRAINING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10625) case HLS_GOING_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10626) return IB_PORTPHYSSTATE_TRAINING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10627) case HLS_GOING_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10628) return OPA_PORTPHYSSTATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10629) case HLS_LINK_COOLDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10630) return OPA_PORTPHYSSTATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10631) case HLS_DN_DOWNDEF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10632) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10633) dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10634) ppd->host_link_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10635) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10639) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10640) * driver_lstate - convert the driver's notion of a port's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10641) * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10642) * (converted to a u32) to indicate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10644) u32 driver_lstate(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10646) if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10647) return IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10649) switch (ppd->host_link_state & HLS_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10650) case HLS_UP_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10651) return IB_PORT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10652) case HLS_UP_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10653) return IB_PORT_ARMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10654) case HLS_UP_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10655) return IB_PORT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10656) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10657) dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10658) ppd->host_link_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10659) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10663) void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10664) u8 neigh_reason, u8 rem_reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10666) if (ppd->local_link_down_reason.latest == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10667) ppd->neigh_link_down_reason.latest == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10668) ppd->local_link_down_reason.latest = lcl_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10669) ppd->neigh_link_down_reason.latest = neigh_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10670) ppd->remote_link_down_reason = rem_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10675) * data_vls_operational() - Verify if data VL BCT credits and MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10676) * are both set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10677) * @ppd: pointer to hfi1_pportdata structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10679) * Return: true - Ok, false -otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10681) static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10683) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10684) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10686) if (!ppd->actual_vls_operational)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10687) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10689) for (i = 0; i < ppd->vls_supported; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10690) reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10691) if ((reg && !ppd->dd->vld[i].mtu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10692) (!reg && ppd->dd->vld[i].mtu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10693) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10696) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10700) * Change the physical and/or logical link state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10702) * Do not call this routine while inside an interrupt. It contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10703) * calls to routines that can take multiple seconds to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10704) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10705) * Returns 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10707) int set_link_state(struct hfi1_pportdata *ppd, u32 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10709) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10710) struct ib_event event = {.device = NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10711) int ret1, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10712) int orig_new_state, poll_bounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10714) mutex_lock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10716) orig_new_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10717) if (state == HLS_DN_DOWNDEF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10718) state = HLS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10720) /* interpret poll -> poll as a link bounce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10721) poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10722) state == HLS_DN_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10724) dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10725) link_state_name(ppd->host_link_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10726) link_state_name(orig_new_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10727) poll_bounce ? "(bounce) " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10728) link_state_reason_name(ppd, state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10731) * If we're going to a (HLS_*) link state that implies the logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10732) * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10733) * reset is_sm_config_started to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10735) if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10736) ppd->is_sm_config_started = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10739) * Do nothing if the states match. Let a poll to poll link bounce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10740) * go through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10742) if (ppd->host_link_state == state && !poll_bounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10743) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10745) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10746) case HLS_UP_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10747) if (ppd->host_link_state == HLS_DN_POLL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10748) (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10750) * Quick link up jumps from polling to here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10751) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10752) * Whether in normal or loopback mode, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10753) * simulator jumps from polling to link up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10754) * Accept that here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10756) /* OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10757) } else if (ppd->host_link_state != HLS_GOING_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10758) goto unexpected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10762) * Wait for Link_Up physical state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10763) * Physical and Logical states should already be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10764) * be transitioned to LinkUp and LinkInit respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10766) ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10767) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10768) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10769) "%s: physical state did not change to LINK-UP\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10770) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10774) ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10775) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10776) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10777) "%s: logical state did not change to INIT\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10778) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10782) /* clear old transient LINKINIT_REASON code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10783) if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10784) ppd->linkinit_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10785) OPA_LINKINIT_REASON_LINKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10787) /* enable the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10788) add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10790) handle_linkup_change(dd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10791) pio_kernel_linkup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10793) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10794) * After link up, a new link width will have been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10795) * Update the xmit counters with regards to the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10796) * link width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10798) update_xmit_counters(ppd, ppd->link_width_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10800) ppd->host_link_state = HLS_UP_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10801) update_statusp(ppd, IB_PORT_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10803) case HLS_UP_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10804) if (ppd->host_link_state != HLS_UP_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10805) goto unexpected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10807) if (!data_vls_operational(ppd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10808) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10809) "%s: Invalid data VL credits or mtu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10810) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10811) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10815) set_logical_state(dd, LSTATE_ARMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10816) ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10817) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10818) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10819) "%s: logical state did not change to ARMED\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10820) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10821) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10823) ppd->host_link_state = HLS_UP_ARMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10824) update_statusp(ppd, IB_PORT_ARMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10826) * The simulator does not currently implement SMA messages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10827) * so neighbor_normal is not set. Set it here when we first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10828) * move to Armed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10830) if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10831) ppd->neighbor_normal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10833) case HLS_UP_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10834) if (ppd->host_link_state != HLS_UP_ARMED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10835) goto unexpected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10837) set_logical_state(dd, LSTATE_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10838) ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10839) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10840) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10841) "%s: logical state did not change to ACTIVE\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10842) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10843) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10844) /* tell all engines to go running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10845) sdma_all_running(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10846) ppd->host_link_state = HLS_UP_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10847) update_statusp(ppd, IB_PORT_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10849) /* Signal the IB layer that the port has went active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10850) event.device = &dd->verbs_dev.rdi.ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10851) event.element.port_num = ppd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10852) event.event = IB_EVENT_PORT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10854) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10855) case HLS_DN_POLL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10856) if ((ppd->host_link_state == HLS_DN_DISABLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10857) ppd->host_link_state == HLS_DN_OFFLINE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10858) dd->dc_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10859) dc_start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10860) /* Hand LED control to the DC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10861) write_csr(dd, DCC_CFG_LED_CNTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10863) if (ppd->host_link_state != HLS_DN_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10864) u8 tmp = ppd->link_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10866) ret = goto_offline(ppd, ppd->remote_link_down_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10867) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10868) ppd->link_enabled = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10871) ppd->remote_link_down_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10873) if (ppd->driver_link_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10874) ppd->link_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10877) set_all_slowpath(ppd->dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10878) ret = set_local_link_attributes(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10879) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10882) ppd->port_error_action = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10884) if (quick_linkup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10885) /* quick linkup does not go into polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10886) ret = do_quick_linkup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10887) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10888) ret1 = set_physical_link_state(dd, PLS_POLLING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10889) if (!ret1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10890) ret1 = wait_phys_link_out_of_offline(ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10891) 3000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10892) if (ret1 != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10893) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10894) "Failed to transition to Polling link state, return 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10895) ret1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10896) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10901) * Change the host link state after requesting DC8051 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10902) * change its physical state so that we can ignore any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10903) * interrupt with stale LNI(XX) error, which will not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10904) * cleared until DC8051 transitions to Polling state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10906) ppd->host_link_state = HLS_DN_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10907) ppd->offline_disabled_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10908) HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10910) * If an error occurred above, go back to offline. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10911) * caller may reschedule another attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10913) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10914) goto_offline(ppd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10915) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10916) log_physical_state(ppd, PLS_POLLING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10918) case HLS_DN_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10919) /* link is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10920) ppd->link_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10922) /* allow any state to transition to disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10924) /* must transition to offline first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10925) if (ppd->host_link_state != HLS_DN_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10926) ret = goto_offline(ppd, ppd->remote_link_down_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10927) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10929) ppd->remote_link_down_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10932) if (!dd->dc_shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10933) ret1 = set_physical_link_state(dd, PLS_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10934) if (ret1 != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10935) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10936) "Failed to transition to Disabled link state, return 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10937) ret1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10938) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10941) ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10942) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10943) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10944) "%s: physical state did not change to DISABLED\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10945) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10948) dc_shutdown(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10950) ppd->host_link_state = HLS_DN_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10952) case HLS_DN_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10953) if (ppd->host_link_state == HLS_DN_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10954) dc_start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10956) /* allow any state to transition to offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10957) ret = goto_offline(ppd, ppd->remote_link_down_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10958) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10959) ppd->remote_link_down_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10961) case HLS_VERIFY_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10962) if (ppd->host_link_state != HLS_DN_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10963) goto unexpected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10964) ppd->host_link_state = HLS_VERIFY_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10965) log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10967) case HLS_GOING_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10968) if (ppd->host_link_state != HLS_VERIFY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10969) goto unexpected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10971) ret1 = set_physical_link_state(dd, PLS_LINKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10972) if (ret1 != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10973) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10974) "Failed to transition to link up state, return 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10975) ret1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10976) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10979) ppd->host_link_state = HLS_GOING_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10982) case HLS_GOING_OFFLINE: /* transient within goto_offline() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10983) case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10984) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10985) dd_dev_info(dd, "%s: state 0x%x: not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10986) __func__, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10987) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10988) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10991) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10993) unexpected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10994) dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10995) __func__, link_state_name(ppd->host_link_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10996) link_state_name(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10997) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10999) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11000) mutex_unlock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11002) if (event.device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11003) ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11005) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11008) int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11010) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11011) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11013) switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11014) case HFI1_IB_CFG_LIDLMC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11015) set_lidlmc(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11017) case HFI1_IB_CFG_VL_HIGH_LIMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11019) * The VL Arbitrator high limit is sent in units of 4k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11020) * bytes, while HFI stores it in units of 64 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11022) val *= 4096 / 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11023) reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11024) << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11025) write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11026) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11027) case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11028) /* HFI only supports POLL as the default link down state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11029) if (val != HLS_DN_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11030) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11032) case HFI1_IB_CFG_OP_VLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11033) if (ppd->vls_operational != val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11034) ppd->vls_operational = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11035) if (!ppd->port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11036) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11038) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11040) * For link width, link width downgrade, and speed enable, always AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11041) * the setting with what is actually supported. This has two benefits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11042) * First, enabled can't have unsupported values, no matter what the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11043) * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11044) * "fill in with your supported value" have all the bits in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11045) * field set, so simply ANDing with supported has the desired result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11047) case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11048) ppd->link_width_enabled = val & ppd->link_width_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11050) case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11051) ppd->link_width_downgrade_enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11052) val & ppd->link_width_downgrade_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11053) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11054) case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11055) ppd->link_speed_enabled = val & ppd->link_speed_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11057) case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11059) * HFI does not follow IB specs, save this value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11060) * so we can report it, if asked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11062) ppd->overrun_threshold = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11064) case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11065) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11066) * HFI does not follow IB specs, save this value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11067) * so we can report it, if asked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11069) ppd->phy_error_threshold = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11070) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11072) case HFI1_IB_CFG_MTU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11073) set_send_length(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11076) case HFI1_IB_CFG_PKEYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11077) if (HFI1_CAP_IS_KSET(PKEY_CHECK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11078) set_partition_keys(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11081) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11082) if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11083) dd_dev_info(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11084) "%s: which %s, val 0x%x: not implemented\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11085) __func__, ib_cfg_name(which), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11086) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11091) /* begin functions related to vl arbitration table caching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11092) static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11094) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11096) BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11097) VL_ARB_LOW_PRIO_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11098) BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11099) VL_ARB_HIGH_PRIO_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11102) * Note that we always return values directly from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11103) * 'vl_arb_cache' (and do no CSR reads) in response to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11104) * 'Get(VLArbTable)'. This is obviously correct after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11105) * 'Set(VLArbTable)', since the cache will then be up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11106) * date. But it's also correct prior to any 'Set(VLArbTable)'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11107) * since then both the cache, and the relevant h/w registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11108) * will be zeroed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11111) for (i = 0; i < MAX_PRIO_TABLE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11112) spin_lock_init(&ppd->vl_arb_cache[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11116) * vl_arb_lock_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11118) * All other vl_arb_* functions should be called only after locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11119) * the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11121) static inline struct vl_arb_cache *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11122) vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11124) if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11125) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11126) spin_lock(&ppd->vl_arb_cache[idx].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11127) return &ppd->vl_arb_cache[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11130) static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11132) spin_unlock(&ppd->vl_arb_cache[idx].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11135) static void vl_arb_get_cache(struct vl_arb_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11136) struct ib_vl_weight_elem *vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11138) memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11141) static void vl_arb_set_cache(struct vl_arb_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11142) struct ib_vl_weight_elem *vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11144) memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11147) static int vl_arb_match_cache(struct vl_arb_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11148) struct ib_vl_weight_elem *vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11150) return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11153) /* end functions related to vl arbitration table caching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11155) static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11156) u32 size, struct ib_vl_weight_elem *vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11158) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11159) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11160) unsigned int i, is_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11161) int drain, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11163) mutex_lock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11165) if (ppd->host_link_state & HLS_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11166) is_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11168) drain = !is_ax(dd) && is_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11170) if (drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11172) * Before adjusting VL arbitration weights, empty per-VL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11173) * FIFOs, otherwise a packet whose VL weight is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11174) * set to 0 could get stuck in a FIFO with no chance to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11175) * egress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11177) ret = stop_drain_data_vls(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11179) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11180) dd_dev_err(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11181) dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11182) "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11183) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11184) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11187) for (i = 0; i < size; i++, vl++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11189) * NOTE: The low priority shift and mask are used here, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11190) * they are the same for both the low and high registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11192) reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11193) << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11194) | (((u64)vl->weight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11195) & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11196) << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11197) write_csr(dd, target + (i * 8), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11199) pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11201) if (drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11202) open_fill_data_vls(dd); /* reopen all VLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11204) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11205) mutex_unlock(&ppd->hls_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11207) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11211) * Read one credit merge VL register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11213) static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11214) struct vl_limit *vll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11216) u64 reg = read_csr(dd, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11218) vll->dedicated = cpu_to_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11219) (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11220) & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11221) vll->shared = cpu_to_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11222) (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11223) & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11227) * Read the current credit merge limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11229) static int get_buffer_control(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11230) struct buffer_control *bc, u16 *overall_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11232) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11233) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11235) /* not all entries are filled in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11236) memset(bc, 0, sizeof(*bc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11238) /* OPA and HFI have a 1-1 mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11239) for (i = 0; i < TXE_NUM_DATA_VL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11240) read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11242) /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11243) read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11245) reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11246) bc->overall_shared_limit = cpu_to_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11247) (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11248) & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11249) if (overall_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11250) *overall_limit = (reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11251) >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11252) & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11253) return sizeof(struct buffer_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11256) static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11258) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11259) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11261) /* each register contains 16 SC->VLnt mappings, 4 bits each */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11262) reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11263) for (i = 0; i < sizeof(u64); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11264) u8 byte = *(((u8 *)®) + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11266) dp->vlnt[2 * i] = byte & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11267) dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11270) reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11271) for (i = 0; i < sizeof(u64); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11272) u8 byte = *(((u8 *)®) + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11274) dp->vlnt[16 + (2 * i)] = byte & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11275) dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11277) return sizeof(struct sc2vlnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11280) static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11281) struct ib_vl_weight_elem *vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11283) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11285) for (i = 0; i < nelems; i++, vl++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11286) vl->vl = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11287) vl->weight = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11291) static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11293) write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11294) DC_SC_VL_VAL(15_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11295) 0, dp->vlnt[0] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11296) 1, dp->vlnt[1] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11297) 2, dp->vlnt[2] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11298) 3, dp->vlnt[3] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11299) 4, dp->vlnt[4] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11300) 5, dp->vlnt[5] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11301) 6, dp->vlnt[6] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11302) 7, dp->vlnt[7] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11303) 8, dp->vlnt[8] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11304) 9, dp->vlnt[9] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11305) 10, dp->vlnt[10] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11306) 11, dp->vlnt[11] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11307) 12, dp->vlnt[12] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11308) 13, dp->vlnt[13] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11309) 14, dp->vlnt[14] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11310) 15, dp->vlnt[15] & 0xf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11311) write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11312) DC_SC_VL_VAL(31_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11313) 16, dp->vlnt[16] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11314) 17, dp->vlnt[17] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11315) 18, dp->vlnt[18] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11316) 19, dp->vlnt[19] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11317) 20, dp->vlnt[20] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11318) 21, dp->vlnt[21] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11319) 22, dp->vlnt[22] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11320) 23, dp->vlnt[23] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11321) 24, dp->vlnt[24] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11322) 25, dp->vlnt[25] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11323) 26, dp->vlnt[26] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11324) 27, dp->vlnt[27] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11325) 28, dp->vlnt[28] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11326) 29, dp->vlnt[29] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11327) 30, dp->vlnt[30] & 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11328) 31, dp->vlnt[31] & 0xf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11331) static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11332) u16 limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11334) if (limit != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11335) dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11336) what, (int)limit, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11339) /* change only the shared limit portion of SendCmGLobalCredit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11340) static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11342) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11344) reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11345) reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11346) reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11347) write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11350) /* change only the total credit limit portion of SendCmGLobalCredit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11351) static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11353) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11355) reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11356) reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11357) reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11358) write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11361) /* set the given per-VL shared limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11362) static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11364) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11365) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11367) if (vl < TXE_NUM_DATA_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11368) addr = SEND_CM_CREDIT_VL + (8 * vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11369) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11370) addr = SEND_CM_CREDIT_VL15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11372) reg = read_csr(dd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11373) reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11374) reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11375) write_csr(dd, addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11378) /* set the given per-VL dedicated limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11379) static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11381) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11382) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11384) if (vl < TXE_NUM_DATA_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11385) addr = SEND_CM_CREDIT_VL + (8 * vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11386) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11387) addr = SEND_CM_CREDIT_VL15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11389) reg = read_csr(dd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11390) reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11391) reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11392) write_csr(dd, addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11395) /* spin until the given per-VL status mask bits clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11396) static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11397) const char *which)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11399) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11400) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11402) timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11403) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11404) reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11406) if (reg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11407) return; /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11408) if (time_after(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11409) break; /* timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11410) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11413) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11414) "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11415) which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11417) * If this occurs, it is likely there was a credit loss on the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11418) * The only recovery from that is a link bounce.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11420) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11421) "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11425) * The number of credits on the VLs may be changed while everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11426) * is "live", but the following algorithm must be followed due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11427) * how the hardware is actually implemented. In particular,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11428) * Return_Credit_Status[] is the only correct status check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11430) * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11431) * set Global_Shared_Credit_Limit = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11432) * use_all_vl = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11433) * mask0 = all VLs that are changing either dedicated or shared limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11434) * set Shared_Limit[mask0] = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11435) * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11436) * if (changing any dedicated limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11437) * mask1 = all VLs that are lowering dedicated limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11438) * lower Dedicated_Limit[mask1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11439) * spin until Return_Credit_Status[mask1] == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11440) * raise Dedicated_Limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11441) * raise Shared_Limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11442) * raise Global_Shared_Credit_Limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11444) * lower = if the new limit is lower, set the limit to the new value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11445) * raise = if the new limit is higher than the current value (may be changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11446) * earlier in the algorithm), set the new limit to the new value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11448) int set_buffer_control(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11449) struct buffer_control *new_bc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11451) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11452) u64 changing_mask, ld_mask, stat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11453) int change_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11454) int i, use_all_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11455) int this_shared_changing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11456) int vl_count = 0, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11458) * A0: add the variable any_shared_limit_changing below and in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11459) * algorithm above. If removing A0 support, it can be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11461) int any_shared_limit_changing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11462) struct buffer_control cur_bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11463) u8 changing[OPA_MAX_VLS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11464) u8 lowering_dedicated[OPA_MAX_VLS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11465) u16 cur_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11466) u32 new_total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11467) const u64 all_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11468) SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11469) | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11470) | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11471) | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11472) | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11473) | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11474) | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11475) | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11476) | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11478) #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11479) #define NUM_USABLE_VLS 16 /* look at VL15 and less */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11481) /* find the new total credits, do sanity check on unused VLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11482) for (i = 0; i < OPA_MAX_VLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11483) if (valid_vl(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11484) new_total += be16_to_cpu(new_bc->vl[i].dedicated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11485) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11487) nonzero_msg(dd, i, "dedicated",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11488) be16_to_cpu(new_bc->vl[i].dedicated));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11489) nonzero_msg(dd, i, "shared",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11490) be16_to_cpu(new_bc->vl[i].shared));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11491) new_bc->vl[i].dedicated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11492) new_bc->vl[i].shared = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11494) new_total += be16_to_cpu(new_bc->overall_shared_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11496) /* fetch the current values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11497) get_buffer_control(dd, &cur_bc, &cur_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11500) * Create the masks we will use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11502) memset(changing, 0, sizeof(changing));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11503) memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11505) * NOTE: Assumes that the individual VL bits are adjacent and in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11506) * increasing order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11508) stat_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11509) SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11510) changing_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11511) ld_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11512) change_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11513) any_shared_limit_changing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11514) for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11515) if (!valid_vl(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11516) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11517) this_shared_changing = new_bc->vl[i].shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11518) != cur_bc.vl[i].shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11519) if (this_shared_changing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11520) any_shared_limit_changing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11521) if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11522) this_shared_changing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11523) changing[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11524) changing_mask |= stat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11525) change_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11527) if (be16_to_cpu(new_bc->vl[i].dedicated) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11528) be16_to_cpu(cur_bc.vl[i].dedicated)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11529) lowering_dedicated[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11530) ld_mask |= stat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11534) /* bracket the credit change with a total adjustment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11535) if (new_total > cur_total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11536) set_global_limit(dd, new_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11539) * Start the credit change algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11541) use_all_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11542) if ((be16_to_cpu(new_bc->overall_shared_limit) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11543) be16_to_cpu(cur_bc.overall_shared_limit)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11544) (is_ax(dd) && any_shared_limit_changing)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11545) set_global_shared(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11546) cur_bc.overall_shared_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11547) use_all_mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11550) for (i = 0; i < NUM_USABLE_VLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11551) if (!valid_vl(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11552) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11554) if (changing[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11555) set_vl_shared(dd, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11556) cur_bc.vl[i].shared = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11560) wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11561) "shared");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11563) if (change_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11564) for (i = 0; i < NUM_USABLE_VLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11565) if (!valid_vl(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11566) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11568) if (lowering_dedicated[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11569) set_vl_dedicated(dd, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11570) be16_to_cpu(new_bc->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11571) vl[i].dedicated));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11572) cur_bc.vl[i].dedicated =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11573) new_bc->vl[i].dedicated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11577) wait_for_vl_status_clear(dd, ld_mask, "dedicated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11579) /* now raise all dedicated that are going up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11580) for (i = 0; i < NUM_USABLE_VLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11581) if (!valid_vl(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11582) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11584) if (be16_to_cpu(new_bc->vl[i].dedicated) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11585) be16_to_cpu(cur_bc.vl[i].dedicated))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11586) set_vl_dedicated(dd, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11587) be16_to_cpu(new_bc->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11588) vl[i].dedicated));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11592) /* next raise all shared that are going up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11593) for (i = 0; i < NUM_USABLE_VLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11594) if (!valid_vl(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11595) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11597) if (be16_to_cpu(new_bc->vl[i].shared) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11598) be16_to_cpu(cur_bc.vl[i].shared))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11599) set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11602) /* finally raise the global shared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11603) if (be16_to_cpu(new_bc->overall_shared_limit) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11604) be16_to_cpu(cur_bc.overall_shared_limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11605) set_global_shared(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11606) be16_to_cpu(new_bc->overall_shared_limit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11608) /* bracket the credit change with a total adjustment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11609) if (new_total < cur_total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11610) set_global_limit(dd, new_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11613) * Determine the actual number of operational VLS using the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11614) * dedicated and shared credits for each VL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11616) if (change_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11617) for (i = 0; i < TXE_NUM_DATA_VL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11618) if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11619) be16_to_cpu(new_bc->vl[i].shared) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11620) vl_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11621) ppd->actual_vls_operational = vl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11622) ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11623) ppd->actual_vls_operational :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11624) ppd->vls_operational,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11625) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11626) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11627) ret = pio_map_init(dd, ppd->port - 1, vl_count ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11628) ppd->actual_vls_operational :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11629) ppd->vls_operational, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11630) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11631) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11633) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11637) * Read the given fabric manager table. Return the size of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11638) * table (in bytes) on success, and a negative error code on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11639) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11641) int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11644) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11645) struct vl_arb_cache *vlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11647) switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11648) case FM_TBL_VL_HIGH_ARB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11649) size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11651) * OPA specifies 128 elements (of 2 bytes each), though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11652) * HFI supports only 16 elements in h/w.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11654) vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11655) vl_arb_get_cache(vlc, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11656) vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11658) case FM_TBL_VL_LOW_ARB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11659) size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11661) * OPA specifies 128 elements (of 2 bytes each), though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11662) * HFI supports only 16 elements in h/w.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11664) vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11665) vl_arb_get_cache(vlc, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11666) vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11668) case FM_TBL_BUFFER_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11669) size = get_buffer_control(ppd->dd, t, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11671) case FM_TBL_SC2VLNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11672) size = get_sc2vlnt(ppd->dd, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11674) case FM_TBL_VL_PREEMPT_ELEMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11675) size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11676) /* OPA specifies 128 elements, of 2 bytes each */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11677) get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11678) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11679) case FM_TBL_VL_PREEMPT_MATRIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11680) size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11682) * OPA specifies that this is the same size as the VL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11683) * arbitration tables (i.e., 256 bytes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11686) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11687) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11689) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11692) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11693) * Write the given fabric manager table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11695) int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11697) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11698) struct vl_arb_cache *vlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11700) switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11701) case FM_TBL_VL_HIGH_ARB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11702) vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11703) if (vl_arb_match_cache(vlc, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11704) vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11707) vl_arb_set_cache(vlc, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11708) vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11709) ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11710) VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11712) case FM_TBL_VL_LOW_ARB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11713) vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11714) if (vl_arb_match_cache(vlc, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11715) vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11718) vl_arb_set_cache(vlc, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11719) vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11720) ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11721) VL_ARB_LOW_PRIO_TABLE_SIZE, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11722) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11723) case FM_TBL_BUFFER_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11724) ret = set_buffer_control(ppd, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11726) case FM_TBL_SC2VLNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11727) set_sc2vlnt(ppd->dd, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11729) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11730) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11732) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11736) * Disable all data VLs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11737) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11738) * Return 0 if disabled, non-zero if the VLs cannot be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11740) static int disable_data_vls(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11742) if (is_ax(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11743) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11745) pio_send_control(dd, PSC_DATA_VL_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11747) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11751) * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11752) * Just re-enables all data VLs (the "fill" part happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11753) * automatically - the name was chosen for symmetry with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11754) * stop_drain_data_vls()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11755) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11756) * Return 0 if successful, non-zero if the VLs cannot be enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11758) int open_fill_data_vls(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11760) if (is_ax(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11761) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11763) pio_send_control(dd, PSC_DATA_VL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11769) * drain_data_vls() - assumes that disable_data_vls() has been called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11770) * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11771) * engines to drop to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11773) static void drain_data_vls(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11775) sc_wait(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11776) sdma_wait(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11777) pause_for_credit_return(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11781) * stop_drain_data_vls() - disable, then drain all per-VL fifos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11783) * Use open_fill_data_vls() to resume using data VLs. This pair is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11784) * meant to be used like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11785) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11786) * stop_drain_data_vls(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11787) * // do things with per-VL resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11788) * open_fill_data_vls(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11790) int stop_drain_data_vls(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11792) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11794) ret = disable_data_vls(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11795) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11796) drain_data_vls(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11798) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11802) * Convert a nanosecond time to a cclock count. No matter how slow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11803) * the cclock, a non-zero ns will always have a non-zero result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11805) u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11807) u32 cclocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11809) if (dd->icode == ICODE_FPGA_EMULATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11810) cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11811) else /* simulation pretends to be ASIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11812) cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11813) if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11814) cclocks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11815) return cclocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11819) * Convert a cclock count to nanoseconds. Not matter how slow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11820) * the cclock, a non-zero cclocks will always have a non-zero result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11822) u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11824) u32 ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11826) if (dd->icode == ICODE_FPGA_EMULATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11827) ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11828) else /* simulation pretends to be ASIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11829) ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11830) if (cclocks && !ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11831) ns = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11832) return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11836) * Dynamically adjust the receive interrupt timeout for a context based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11837) * incoming packet rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11838) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11839) * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11841) static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11843) struct hfi1_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11844) u32 timeout = rcd->rcvavail_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11847) * This algorithm doubles or halves the timeout depending on whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11848) * the number of packets received in this interrupt were less than or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11849) * greater equal the interrupt count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11850) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11851) * The calculations below do not allow a steady state to be achieved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11852) * Only at the endpoints it is possible to have an unchanging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11853) * timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11855) if (npkts < rcv_intr_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11857) * Not enough packets arrived before the timeout, adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11858) * timeout downward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11860) if (timeout < 2) /* already at minimum? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11861) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11862) timeout >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11863) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11865) * More than enough packets arrived before the timeout, adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11866) * timeout upward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11868) if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11869) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11870) timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11873) rcd->rcvavail_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11875) * timeout cannot be larger than rcv_intr_timeout_csr which has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11876) * been verified to be in range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11878) write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11879) (u64)timeout <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11880) RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11883) void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11884) u32 intr_adjust, u32 npkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11886) struct hfi1_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11887) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11888) u32 ctxt = rcd->ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11891) * Need to write timeout register before updating RcvHdrHead to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11892) * that a new value is used when the HW decides to restart counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11894) if (intr_adjust)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11895) adjust_rcv_timeout(rcd, npkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11896) if (updegr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11897) reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11898) << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11899) write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11901) reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11902) (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11903) << RCV_HDR_HEAD_HEAD_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11904) write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11907) u32 hdrqempty(struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11909) u32 head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11911) head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11912) & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11914) if (hfi1_rcvhdrtail_kvaddr(rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11915) tail = get_rcvhdrtail(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11916) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11917) tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11919) return head == tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11923) * Context Control and Receive Array encoding for buffer size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11924) * 0x0 invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11925) * 0x1 4 KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11926) * 0x2 8 KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11927) * 0x3 16 KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11928) * 0x4 32 KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11929) * 0x5 64 KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11930) * 0x6 128 KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11931) * 0x7 256 KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11932) * 0x8 512 KB (Receive Array only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11933) * 0x9 1 MB (Receive Array only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11934) * 0xa 2 MB (Receive Array only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11935) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11936) * 0xB-0xF - reserved (Receive Array only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11937) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11938) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11939) * This routine assumes that the value has already been sanity checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11941) static u32 encoded_size(u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11943) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11944) case 4 * 1024: return 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11945) case 8 * 1024: return 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11946) case 16 * 1024: return 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11947) case 32 * 1024: return 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11948) case 64 * 1024: return 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11949) case 128 * 1024: return 0x6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11950) case 256 * 1024: return 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11951) case 512 * 1024: return 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11952) case 1 * 1024 * 1024: return 0x9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11953) case 2 * 1024 * 1024: return 0xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11955) return 0x1; /* if invalid, go with the minimum size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11958) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11959) * encode_rcv_header_entry_size - return chip specific encoding for size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11960) * @size: size in dwords
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11961) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11962) * Convert a receive header entry size that to the encoding used in the CSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11964) * Return a zero if the given size is invalid, otherwise the encoding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11966) u8 encode_rcv_header_entry_size(u8 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11968) /* there are only 3 valid receive header entry sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11969) if (size == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11970) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11971) if (size == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11972) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11973) if (size == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11974) return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11975) return 0; /* invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11978) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11979) * hfi1_validate_rcvhdrcnt - validate hdrcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11980) * @dd: the device data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11981) * @thecnt: the header count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11983) int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11985) if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11986) dd_dev_err(dd, "Receive header queue count too small\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11987) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11990) if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11991) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11992) "Receive header queue count cannot be greater than %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11993) HFI1_MAX_HDRQ_EGRBUF_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11994) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11997) if (thecnt % HDRQ_INCREMENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11998) dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11999) thecnt, HDRQ_INCREMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12000) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12006) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12007) * set_hdrq_regs - set header queue registers for context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12008) * @dd: the device data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12009) * @ctxt: the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12010) * @entsize: the dword entry size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12011) * @hdrcnt: the number of header entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12013) void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12015) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12017) reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12018) RCV_HDR_CNT_CNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12019) write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12020) reg = ((u64)encode_rcv_header_entry_size(entsize) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12021) RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12022) RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12023) write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12024) reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12025) RCV_HDR_SIZE_HDR_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12026) write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12029) * Program dummy tail address for every receive context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12030) * before enabling any receive context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12032) write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12033) dd->rcvhdrtail_dummy_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12036) void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12037) struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12039) u64 rcvctrl, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12040) int did_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12041) u16 ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12043) if (!rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12044) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12046) ctxt = rcd->ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12048) hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12050) rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12051) /* if the context already enabled, don't do the extra steps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12052) if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12053) !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12054) /* reset the tail and hdr addresses, and sequence count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12055) write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12056) rcd->rcvhdrq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12057) if (hfi1_rcvhdrtail_kvaddr(rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12058) write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12059) rcd->rcvhdrqtailaddr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12060) hfi1_set_seq_cnt(rcd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12062) /* reset the cached receive header queue head value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12063) hfi1_set_rcd_head(rcd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12065) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12066) * Zero the receive header queue so we don't get false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12067) * positives when checking the sequence number. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12068) * sequence numbers could land exactly on the same spot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12069) * E.g. a rcd restart before the receive header wrapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12071) memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12073) /* starting timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12074) rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12076) /* enable the context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12077) rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12079) /* clean the egr buffer size first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12080) rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12081) rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12082) & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12083) << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12085) /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12086) write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12087) did_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12089) /* zero RcvEgrIndexHead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12090) write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12092) /* set eager count and base index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12093) reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12094) & RCV_EGR_CTRL_EGR_CNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12095) << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12096) (((rcd->eager_base >> RCV_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12097) & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12098) << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12099) write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12102) * Set TID (expected) count and base index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12103) * rcd->expected_count is set to individual RcvArray entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12104) * not pairs, and the CSR takes a pair-count in groups of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12105) * four, so divide by 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12107) reg = (((rcd->expected_count >> RCV_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12108) & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12109) << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12110) (((rcd->expected_base >> RCV_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12111) & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12112) << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12113) write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12114) if (ctxt == HFI1_CTRL_CTXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12115) write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12117) if (op & HFI1_RCVCTRL_CTXT_DIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12118) write_csr(dd, RCV_VL15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12120) * When receive context is being disabled turn on tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12121) * update with a dummy tail address and then disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12122) * receive context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12124) if (dd->rcvhdrtail_dummy_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12125) write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12126) dd->rcvhdrtail_dummy_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12127) /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12128) rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12131) rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12133) if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12134) set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12135) IS_RCVAVAIL_START + rcd->ctxt, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12136) rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12138) if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12139) set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12140) IS_RCVAVAIL_START + rcd->ctxt, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12141) rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12143) if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12144) rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12145) if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12146) /* See comment on RcvCtxtCtrl.TailUpd above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12147) if (!(op & HFI1_RCVCTRL_CTXT_DIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12148) rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12150) if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12151) rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12152) if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12153) rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12154) if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12156) * In one-packet-per-eager mode, the size comes from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12157) * the RcvArray entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12159) rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12160) rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12162) if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12163) rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12164) if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12165) rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12166) if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12167) rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12168) if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12169) rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12170) if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12171) rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12172) if (op & HFI1_RCVCTRL_URGENT_ENB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12173) set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12174) IS_RCVURGENT_START + rcd->ctxt, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12175) if (op & HFI1_RCVCTRL_URGENT_DIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12176) set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12177) IS_RCVURGENT_START + rcd->ctxt, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12179) hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12180) write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12182) /* work around sticky RcvCtxtStatus.BlockedRHQFull */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12183) if (did_enable &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12184) (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12185) reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12186) if (reg != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12187) dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12188) ctxt, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12189) read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12190) write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12191) write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12192) read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12193) reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12194) dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12195) ctxt, reg, reg == 0 ? "not" : "still");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12199) if (did_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12201) * The interrupt timeout and count must be set after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12202) * the context is enabled to take effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12204) /* set interrupt timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12205) write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12206) (u64)rcd->rcvavail_timeout <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12207) RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12209) /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12210) reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12211) write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12214) if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12216) * If the context has been disabled and the Tail Update has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12217) * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12218) * so it doesn't contain an address that is invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12220) write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12221) dd->rcvhdrtail_dummy_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12224) u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12226) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12227) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12229) if (namep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12230) ret = dd->cntrnameslen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12231) *namep = dd->cntrnames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12233) const struct cntr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12234) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12236) ret = (dd->ndevcntrs) * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12238) /* Get the start of the block of counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12239) *cntrp = dd->cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12242) * Now go and fill in each counter in the block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12244) for (i = 0; i < DEV_CNTR_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12245) entry = &dev_cntrs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12246) hfi1_cdbg(CNTR, "reading %s", entry->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12247) if (entry->flags & CNTR_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12248) /* Nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12249) hfi1_cdbg(CNTR, "\tDisabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12250) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12251) if (entry->flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12252) hfi1_cdbg(CNTR, "\tPer VL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12253) for (j = 0; j < C_VL_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12254) val = entry->rw_cntr(entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12255) dd, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12256) CNTR_MODE_R,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12257) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12258) hfi1_cdbg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12259) CNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12260) "\t\tRead 0x%llx for %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12261) val, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12262) dd->cntrs[entry->offset + j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12263) val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12265) } else if (entry->flags & CNTR_SDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12266) hfi1_cdbg(CNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12267) "\t Per SDMA Engine\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12268) for (j = 0; j < chip_sdma_engines(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12269) j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12270) val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12271) entry->rw_cntr(entry, dd, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12272) CNTR_MODE_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12273) hfi1_cdbg(CNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12274) "\t\tRead 0x%llx for %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12275) val, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12276) dd->cntrs[entry->offset + j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12277) val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12279) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12280) val = entry->rw_cntr(entry, dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12281) CNTR_INVALID_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12282) CNTR_MODE_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12283) dd->cntrs[entry->offset] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12284) hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12289) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12293) * Used by sysfs to create files for hfi stats to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12295) u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12297) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12298) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12300) if (namep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12301) ret = ppd->dd->portcntrnameslen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12302) *namep = ppd->dd->portcntrnames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12303) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12304) const struct cntr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12305) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12307) ret = ppd->dd->nportcntrs * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12308) *cntrp = ppd->cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12310) for (i = 0; i < PORT_CNTR_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12311) entry = &port_cntrs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12312) hfi1_cdbg(CNTR, "reading %s", entry->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12313) if (entry->flags & CNTR_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12314) /* Nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12315) hfi1_cdbg(CNTR, "\tDisabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12316) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12319) if (entry->flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12320) hfi1_cdbg(CNTR, "\tPer VL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12321) for (j = 0; j < C_VL_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12322) val = entry->rw_cntr(entry, ppd, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12323) CNTR_MODE_R,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12324) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12325) hfi1_cdbg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12326) CNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12327) "\t\tRead 0x%llx for %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12328) val, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12329) ppd->cntrs[entry->offset + j] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12332) val = entry->rw_cntr(entry, ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12333) CNTR_INVALID_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12334) CNTR_MODE_R,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12335) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12336) ppd->cntrs[entry->offset] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12337) hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12341) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12344) static void free_cntrs(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12346) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12347) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12349) if (dd->synth_stats_timer.function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12350) del_timer_sync(&dd->synth_stats_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12351) ppd = (struct hfi1_pportdata *)(dd + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12352) for (i = 0; i < dd->num_pports; i++, ppd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12353) kfree(ppd->cntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12354) kfree(ppd->scntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12355) free_percpu(ppd->ibport_data.rvp.rc_acks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12356) free_percpu(ppd->ibport_data.rvp.rc_qacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12357) free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12358) ppd->cntrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12359) ppd->scntrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12360) ppd->ibport_data.rvp.rc_acks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12361) ppd->ibport_data.rvp.rc_qacks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12362) ppd->ibport_data.rvp.rc_delayed_comp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12364) kfree(dd->portcntrnames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12365) dd->portcntrnames = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12366) kfree(dd->cntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12367) dd->cntrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12368) kfree(dd->scntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12369) dd->scntrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12370) kfree(dd->cntrnames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12371) dd->cntrnames = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12372) if (dd->update_cntr_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12373) destroy_workqueue(dd->update_cntr_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12374) dd->update_cntr_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12378) static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12379) u64 *psval, void *context, int vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12381) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12382) u64 sval = *psval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12384) if (entry->flags & CNTR_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12385) dd_dev_err(dd, "Counter %s not enabled", entry->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12389) hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12391) val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12393) /* If its a synthetic counter there is more work we need to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12394) if (entry->flags & CNTR_SYNTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12395) if (sval == CNTR_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12396) /* No need to read already saturated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12397) return CNTR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12400) if (entry->flags & CNTR_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12401) /* 32bit counters can wrap multiple times */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12402) u64 upper = sval >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12403) u64 lower = (sval << 32) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12405) if (lower > val) { /* hw wrapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12406) if (upper == CNTR_32BIT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12407) val = CNTR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12408) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12409) upper++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12412) if (val != CNTR_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12413) val = (upper << 32) | val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12416) /* If we rolled we are saturated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12417) if ((val < sval) || (val > CNTR_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12418) val = CNTR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12422) *psval = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12424) hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12426) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12429) static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12430) struct cntr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12431) u64 *psval, void *context, int vl, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12433) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12435) if (entry->flags & CNTR_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12436) dd_dev_err(dd, "Counter %s not enabled", entry->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12437) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12440) hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12442) if (entry->flags & CNTR_SYNTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12443) *psval = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12444) if (entry->flags & CNTR_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12445) val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12446) (data << 32) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12447) val = data; /* return the full 64bit value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12448) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12449) val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12450) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12452) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12453) val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12456) *psval = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12458) hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12460) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12463) u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12465) struct cntr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12466) u64 *sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12468) entry = &dev_cntrs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12469) sval = dd->scntrs + entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12471) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12472) sval += vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12474) return read_dev_port_cntr(dd, entry, sval, dd, vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12477) u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12479) struct cntr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12480) u64 *sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12482) entry = &dev_cntrs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12483) sval = dd->scntrs + entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12485) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12486) sval += vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12488) return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12491) u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12493) struct cntr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12494) u64 *sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12496) entry = &port_cntrs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12497) sval = ppd->scntrs + entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12499) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12500) sval += vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12502) if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12503) (index <= C_RCV_HDR_OVF_LAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12504) /* We do not want to bother for disabled contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12508) return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12511) u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12513) struct cntr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12514) u64 *sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12516) entry = &port_cntrs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12517) sval = ppd->scntrs + entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12519) if (vl != CNTR_INVALID_VL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12520) sval += vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12522) if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12523) (index <= C_RCV_HDR_OVF_LAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12524) /* We do not want to bother for disabled contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12528) return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12531) static void do_update_synth_timer(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12533) u64 cur_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12534) u64 cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12535) u64 total_flits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12536) u8 update = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12537) int i, j, vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12538) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12539) struct cntr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12540) struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12541) update_cntr_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12544) * Rather than keep beating on the CSRs pick a minimal set that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12545) * check to watch for potential roll over. We can do this by looking at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12546) * the number of flits sent/recv. If the total flits exceeds 32bits then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12547) * we have to iterate all the counters and update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12549) entry = &dev_cntrs[C_DC_RCV_FLITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12550) cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12552) entry = &dev_cntrs[C_DC_XMIT_FLITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12553) cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12555) hfi1_cdbg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12556) CNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12557) "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12558) dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12560) if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12562) * May not be strictly necessary to update but it won't hurt and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12563) * simplifies the logic here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12565) update = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12566) hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12567) dd->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12568) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12569) total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12570) hfi1_cdbg(CNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12571) "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12572) total_flits, (u64)CNTR_32BIT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12573) if (total_flits >= CNTR_32BIT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12574) hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12575) dd->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12576) update = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12580) if (update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12581) hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12582) for (i = 0; i < DEV_CNTR_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12583) entry = &dev_cntrs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12584) if (entry->flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12585) for (vl = 0; vl < C_VL_COUNT; vl++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12586) read_dev_cntr(dd, i, vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12587) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12588) read_dev_cntr(dd, i, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12591) ppd = (struct hfi1_pportdata *)(dd + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12592) for (i = 0; i < dd->num_pports; i++, ppd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12593) for (j = 0; j < PORT_CNTR_LAST; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12594) entry = &port_cntrs[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12595) if (entry->flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12596) for (vl = 0; vl < C_VL_COUNT; vl++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12597) read_port_cntr(ppd, j, vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12599) read_port_cntr(ppd, j, CNTR_INVALID_VL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12605) * We want the value in the register. The goal is to keep track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12606) * of the number of "ticks" not the counter value. In other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12607) * words if the register rolls we want to notice it and go ahead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12608) * and force an update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12610) entry = &dev_cntrs[C_DC_XMIT_FLITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12611) dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12612) CNTR_MODE_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12614) entry = &dev_cntrs[C_DC_RCV_FLITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12615) dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12616) CNTR_MODE_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12618) hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12619) dd->unit, dd->last_tx, dd->last_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12621) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12622) hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12626) static void update_synth_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12628) struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12630) queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12631) mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12634) #define C_MAX_NAME 16 /* 15 chars + one for /0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12635) static int init_cntrs(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12637) int i, rcv_ctxts, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12638) size_t sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12639) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12640) char name[C_MAX_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12641) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12642) const char *bit_type_32 = ",32";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12643) const int bit_type_32_sz = strlen(bit_type_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12644) u32 sdma_engines = chip_sdma_engines(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12646) /* set up the stats timer; the add_timer is done at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12647) timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12649) /***********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12650) /* per device counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12651) /***********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12653) /* size names and determine how many we have*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12654) dd->ndevcntrs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12655) sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12657) for (i = 0; i < DEV_CNTR_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12658) if (dev_cntrs[i].flags & CNTR_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12659) hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12660) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12663) if (dev_cntrs[i].flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12664) dev_cntrs[i].offset = dd->ndevcntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12665) for (j = 0; j < C_VL_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12666) snprintf(name, C_MAX_NAME, "%s%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12667) dev_cntrs[i].name, vl_from_idx(j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12668) sz += strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12669) /* Add ",32" for 32-bit counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12670) if (dev_cntrs[i].flags & CNTR_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12671) sz += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12672) sz++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12673) dd->ndevcntrs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12675) } else if (dev_cntrs[i].flags & CNTR_SDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12676) dev_cntrs[i].offset = dd->ndevcntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12677) for (j = 0; j < sdma_engines; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12678) snprintf(name, C_MAX_NAME, "%s%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12679) dev_cntrs[i].name, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12680) sz += strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12681) /* Add ",32" for 32-bit counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12682) if (dev_cntrs[i].flags & CNTR_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12683) sz += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12684) sz++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12685) dd->ndevcntrs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12687) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12688) /* +1 for newline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12689) sz += strlen(dev_cntrs[i].name) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12690) /* Add ",32" for 32-bit counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12691) if (dev_cntrs[i].flags & CNTR_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12692) sz += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12693) dev_cntrs[i].offset = dd->ndevcntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12694) dd->ndevcntrs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12698) /* allocate space for the counter values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12699) dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12700) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12701) if (!dd->cntrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12702) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12704) dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12705) if (!dd->scntrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12706) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12708) /* allocate space for the counter names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12709) dd->cntrnameslen = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12710) dd->cntrnames = kmalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12711) if (!dd->cntrnames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12712) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12714) /* fill in the names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12715) for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12716) if (dev_cntrs[i].flags & CNTR_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12717) /* Nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12718) } else if (dev_cntrs[i].flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12719) for (j = 0; j < C_VL_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12720) snprintf(name, C_MAX_NAME, "%s%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12721) dev_cntrs[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12722) vl_from_idx(j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12723) memcpy(p, name, strlen(name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12724) p += strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12726) /* Counter is 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12727) if (dev_cntrs[i].flags & CNTR_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12728) memcpy(p, bit_type_32, bit_type_32_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12729) p += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12732) *p++ = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12734) } else if (dev_cntrs[i].flags & CNTR_SDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12735) for (j = 0; j < sdma_engines; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12736) snprintf(name, C_MAX_NAME, "%s%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12737) dev_cntrs[i].name, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12738) memcpy(p, name, strlen(name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12739) p += strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12741) /* Counter is 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12742) if (dev_cntrs[i].flags & CNTR_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12743) memcpy(p, bit_type_32, bit_type_32_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12744) p += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12747) *p++ = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12749) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12750) memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12751) p += strlen(dev_cntrs[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12753) /* Counter is 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12754) if (dev_cntrs[i].flags & CNTR_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12755) memcpy(p, bit_type_32, bit_type_32_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12756) p += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12759) *p++ = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12763) /*********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12764) /* per port counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12765) /*********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12768) * Go through the counters for the overflows and disable the ones we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12769) * don't need. This varies based on platform so we need to do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12770) * dynamically here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12772) rcv_ctxts = dd->num_rcv_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12773) for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12774) i <= C_RCV_HDR_OVF_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12775) port_cntrs[i].flags |= CNTR_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12778) /* size port counter names and determine how many we have*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12779) sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12780) dd->nportcntrs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12781) for (i = 0; i < PORT_CNTR_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12782) if (port_cntrs[i].flags & CNTR_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12783) hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12784) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12787) if (port_cntrs[i].flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12788) port_cntrs[i].offset = dd->nportcntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12789) for (j = 0; j < C_VL_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12790) snprintf(name, C_MAX_NAME, "%s%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12791) port_cntrs[i].name, vl_from_idx(j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12792) sz += strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12793) /* Add ",32" for 32-bit counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12794) if (port_cntrs[i].flags & CNTR_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12795) sz += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12796) sz++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12797) dd->nportcntrs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12799) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12800) /* +1 for newline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12801) sz += strlen(port_cntrs[i].name) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12802) /* Add ",32" for 32-bit counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12803) if (port_cntrs[i].flags & CNTR_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12804) sz += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12805) port_cntrs[i].offset = dd->nportcntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12806) dd->nportcntrs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12810) /* allocate space for the counter names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12811) dd->portcntrnameslen = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12812) dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12813) if (!dd->portcntrnames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12814) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12816) /* fill in port cntr names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12817) for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12818) if (port_cntrs[i].flags & CNTR_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12819) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12821) if (port_cntrs[i].flags & CNTR_VL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12822) for (j = 0; j < C_VL_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12823) snprintf(name, C_MAX_NAME, "%s%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12824) port_cntrs[i].name, vl_from_idx(j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12825) memcpy(p, name, strlen(name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12826) p += strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12828) /* Counter is 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12829) if (port_cntrs[i].flags & CNTR_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12830) memcpy(p, bit_type_32, bit_type_32_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12831) p += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12834) *p++ = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12836) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12837) memcpy(p, port_cntrs[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12838) strlen(port_cntrs[i].name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12839) p += strlen(port_cntrs[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12841) /* Counter is 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12842) if (port_cntrs[i].flags & CNTR_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12843) memcpy(p, bit_type_32, bit_type_32_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12844) p += bit_type_32_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12847) *p++ = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12851) /* allocate per port storage for counter values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12852) ppd = (struct hfi1_pportdata *)(dd + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12853) for (i = 0; i < dd->num_pports; i++, ppd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12854) ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12855) if (!ppd->cntrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12856) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12858) ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12859) if (!ppd->scntrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12860) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12863) /* CPU counters need to be allocated and zeroed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12864) if (init_cpu_counters(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12865) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12867) dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12868) WQ_MEM_RECLAIM, dd->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12869) if (!dd->update_cntr_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12870) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12872) INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12874) mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12875) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12876) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12877) free_cntrs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12878) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12881) static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12883) switch (chip_lstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12884) case LSTATE_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12885) return IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12886) case LSTATE_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12887) return IB_PORT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12888) case LSTATE_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12889) return IB_PORT_ARMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12890) case LSTATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12891) return IB_PORT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12892) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12893) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12894) "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12895) chip_lstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12896) return IB_PORT_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12900) u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12902) /* look at the HFI meta-states only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12903) switch (chip_pstate & 0xf0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12904) case PLS_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12905) return IB_PORTPHYSSTATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12906) case PLS_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12907) return OPA_PORTPHYSSTATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12908) case PLS_POLLING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12909) return IB_PORTPHYSSTATE_POLLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12910) case PLS_CONFIGPHY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12911) return IB_PORTPHYSSTATE_TRAINING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12912) case PLS_LINKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12913) return IB_PORTPHYSSTATE_LINKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12914) case PLS_PHYTEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12915) return IB_PORTPHYSSTATE_PHY_TEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12916) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12917) dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12918) chip_pstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12919) return IB_PORTPHYSSTATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12923) /* return the OPA port logical state name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12924) const char *opa_lstate_name(u32 lstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12926) static const char * const port_logical_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12927) "PORT_NOP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12928) "PORT_DOWN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12929) "PORT_INIT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12930) "PORT_ARMED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12931) "PORT_ACTIVE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12932) "PORT_ACTIVE_DEFER",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12933) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12934) if (lstate < ARRAY_SIZE(port_logical_names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12935) return port_logical_names[lstate];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12936) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12939) /* return the OPA port physical state name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12940) const char *opa_pstate_name(u32 pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12942) static const char * const port_physical_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12943) "PHYS_NOP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12944) "reserved1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12945) "PHYS_POLL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12946) "PHYS_DISABLED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12947) "PHYS_TRAINING",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12948) "PHYS_LINKUP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12949) "PHYS_LINK_ERR_RECOVER",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12950) "PHYS_PHY_TEST",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12951) "reserved8",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12952) "PHYS_OFFLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12953) "PHYS_GANGED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12954) "PHYS_TEST",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12956) if (pstate < ARRAY_SIZE(port_physical_names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12957) return port_physical_names[pstate];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12958) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12961) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12962) * update_statusp - Update userspace status flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12963) * @ppd: Port data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12964) * @state: port state information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12966) * Actual port status is determined by the host_link_state value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12967) * in the ppd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12969) * host_link_state MUST be updated before updating the user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12970) * statusp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12972) static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12975) * Set port status flags in the page mapped into userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12976) * memory. Do it here to ensure a reliable state - this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12977) * the only function called by all state handling code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12978) * Always set the flags due to the fact that the cache value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12979) * might have been changed explicitly outside of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12980) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12982) if (ppd->statusp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12983) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12984) case IB_PORT_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12985) case IB_PORT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12986) *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12987) HFI1_STATUS_IB_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12988) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12989) case IB_PORT_ARMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12990) *ppd->statusp |= HFI1_STATUS_IB_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12991) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12992) case IB_PORT_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12993) *ppd->statusp |= HFI1_STATUS_IB_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12994) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12997) dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12998) opa_lstate_name(state), state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13001) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13002) * wait_logical_linkstate - wait for an IB link state change to occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13003) * @ppd: port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13004) * @state: the state to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13005) * @msecs: the number of milliseconds to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13006) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13007) * Wait up to msecs milliseconds for IB link state change to occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13008) * For now, take the easy polling route.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13009) * Returns 0 if state reached, otherwise -ETIMEDOUT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13011) static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13012) int msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13014) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13015) u32 new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13017) timeout = jiffies + msecs_to_jiffies(msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13018) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13019) new_state = chip_to_opa_lstate(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13020) read_logical_state(ppd->dd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13021) if (new_state == state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13023) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13024) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13025) "timeout waiting for link state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13026) state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13027) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13029) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13032) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13035) static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13037) u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13039) dd_dev_info(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13040) "physical state changed to %s (0x%x), phy 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13041) opa_pstate_name(ib_pstate), ib_pstate, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13045) * Read the physical hardware link state and check if it matches host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13046) * drivers anticipated state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13048) static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13050) u32 read_state = read_physical_state(ppd->dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13052) if (read_state == state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13053) log_state_transition(ppd, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13054) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13055) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13056) "anticipated phy link state 0x%x, read 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13057) state, read_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13062) * wait_physical_linkstate - wait for an physical link state change to occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13063) * @ppd: port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13064) * @state: the state to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13065) * @msecs: the number of milliseconds to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13066) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13067) * Wait up to msecs milliseconds for physical link state change to occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13068) * Returns 0 if state reached, otherwise -ETIMEDOUT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13070) static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13071) int msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13073) u32 read_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13074) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13076) timeout = jiffies + msecs_to_jiffies(msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13077) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13078) read_state = read_physical_state(ppd->dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13079) if (read_state == state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13081) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13082) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13083) "timeout waiting for phy link state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13084) state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13085) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13087) usleep_range(1950, 2050); /* sleep 2ms-ish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13090) log_state_transition(ppd, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13091) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13095) * wait_phys_link_offline_quiet_substates - wait for any offline substate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13096) * @ppd: port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13097) * @msecs: the number of milliseconds to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13098) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13099) * Wait up to msecs milliseconds for any offline physical link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13100) * state change to occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13101) * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13103) static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13104) int msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13106) u32 read_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13107) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13109) timeout = jiffies + msecs_to_jiffies(msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13110) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13111) read_state = read_physical_state(ppd->dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13112) if ((read_state & 0xF0) == PLS_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13114) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13115) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13116) "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13117) read_state, msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13118) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13120) usleep_range(1950, 2050); /* sleep 2ms-ish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13123) log_state_transition(ppd, read_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13124) return read_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13128) * wait_phys_link_out_of_offline - wait for any out of offline state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13129) * @ppd: port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13130) * @msecs: the number of milliseconds to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13132) * Wait up to msecs milliseconds for any out of offline physical link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13133) * state change to occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13134) * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13136) static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13137) int msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13139) u32 read_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13140) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13142) timeout = jiffies + msecs_to_jiffies(msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13143) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13144) read_state = read_physical_state(ppd->dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13145) if ((read_state & 0xF0) != PLS_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13147) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13148) dd_dev_err(ppd->dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13149) "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13150) read_state, msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13151) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13153) usleep_range(1950, 2050); /* sleep 2ms-ish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13156) log_state_transition(ppd, read_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13157) return read_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13160) #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13161) (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13163) #define SET_STATIC_RATE_CONTROL_SMASK(r) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13164) (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13166) void hfi1_init_ctxt(struct send_context *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13168) if (sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13169) struct hfi1_devdata *dd = sc->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13170) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13171) u8 set = (sc->type == SC_USER ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13172) HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13173) HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13174) reg = read_kctxt_csr(dd, sc->hw_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13175) SEND_CTXT_CHECK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13176) if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13177) CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13178) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13179) SET_STATIC_RATE_CONTROL_SMASK(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13180) write_kctxt_csr(dd, sc->hw_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13181) SEND_CTXT_CHECK_ENABLE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13185) int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13187) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13188) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13190) if (dd->icode != ICODE_RTL_SILICON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13191) if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13192) dd_dev_info(dd, "%s: tempsense not supported by HW\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13193) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13196) reg = read_csr(dd, ASIC_STS_THERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13197) temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13198) ASIC_STS_THERM_CURR_TEMP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13199) temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13200) ASIC_STS_THERM_LO_TEMP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13201) temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13202) ASIC_STS_THERM_HI_TEMP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13203) temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13204) ASIC_STS_THERM_CRIT_TEMP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13205) /* triggers is a 3-bit value - 1 bit per trigger. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13206) temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13208) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13211) /* ========================================================================= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13213) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13214) * read_mod_write() - Calculate the IRQ register index and set/clear the bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13215) * @dd: valid devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13216) * @src: IRQ source to determine register index from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13217) * @bits: the bits to set or clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13218) * @set: true == set the bits, false == clear the bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13221) static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13222) bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13224) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13225) u16 idx = src / BITS_PER_REGISTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13227) spin_lock(&dd->irq_src_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13228) reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13229) if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13230) reg |= bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13231) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13232) reg &= ~bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13233) write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13234) spin_unlock(&dd->irq_src_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13237) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13238) * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13239) * @dd: valid devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13240) * @first: first IRQ source to set/clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13241) * @last: last IRQ source (inclusive) to set/clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13242) * @set: true == set the bits, false == clear the bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13244) * If first == last, set the exact source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13246) int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13248) u64 bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13249) u64 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13250) u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13252) if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13253) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13255) if (last < first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13256) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13258) for (src = first; src <= last; src++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13259) bit = src % BITS_PER_REGISTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13260) /* wrapped to next register? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13261) if (!bit && bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13262) read_mod_write(dd, src - 1, bits, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13263) bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13265) bits |= BIT_ULL(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13267) read_mod_write(dd, last, bits, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13273) * Clear all interrupt sources on the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13275) void clear_all_interrupts(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13277) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13279) for (i = 0; i < CCE_NUM_INT_CSRS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13280) write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13282) write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13283) write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13284) write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13285) write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13286) write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13287) write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13288) write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13289) for (i = 0; i < chip_send_contexts(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13290) write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13291) for (i = 0; i < chip_sdma_engines(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13292) write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13294) write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13295) write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13296) write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13300) * Remap the interrupt source from the general handler to the given MSI-X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13301) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13303) void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13305) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13306) int m, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13308) /* clear from the handled mask of the general interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13309) m = isrc / 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13310) n = isrc % 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13311) if (likely(m < CCE_NUM_INT_CSRS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13312) dd->gi_mask[m] &= ~((u64)1 << n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13313) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13314) dd_dev_err(dd, "remap interrupt err\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13318) /* direct the chip source to the given MSI-X interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13319) m = isrc / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13320) n = isrc % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13321) reg = read_csr(dd, CCE_INT_MAP + (8 * m));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13322) reg &= ~((u64)0xff << (8 * n));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13323) reg |= ((u64)msix_intr & 0xff) << (8 * n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13324) write_csr(dd, CCE_INT_MAP + (8 * m), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13327) void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13330) * SDMA engine interrupt sources grouped by type, rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13331) * engine. Per-engine interrupts are as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13332) * SDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13333) * SDMAProgress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13334) * SDMAIdle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13336) remap_intr(dd, IS_SDMA_START + engine, msix_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13337) remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13338) remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13342) * Set the general handler to accept all interrupts, remap all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13343) * chip interrupts back to MSI-X 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13345) void reset_interrupts(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13347) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13349) /* all interrupts handled by the general handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13350) for (i = 0; i < CCE_NUM_INT_CSRS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13351) dd->gi_mask[i] = ~(u64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13353) /* all chip interrupts map to MSI-X 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13354) for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13355) write_csr(dd, CCE_INT_MAP + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13359) * set_up_interrupts() - Initialize the IRQ resources and state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13360) * @dd: valid devdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13363) static int set_up_interrupts(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13365) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13367) /* mask all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13368) set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13370) /* clear all pending interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13371) clear_all_interrupts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13373) /* reset general handler mask, chip MSI-X mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13374) reset_interrupts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13376) /* ask for MSI-X interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13377) ret = msix_initialize(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13378) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13379) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13381) ret = msix_request_irqs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13382) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13383) msix_clean_up_interrupts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13385) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13389) * Set up context values in dd. Sets:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13391) * num_rcv_contexts - number of contexts being used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13392) * n_krcv_queues - number of kernel contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13393) * first_dyn_alloc_ctxt - first dynamically allocated context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13394) * in array of contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13395) * freectxts - number of free user contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13396) * num_send_contexts - number of PIO send contexts being used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13397) * num_netdev_contexts - number of contexts reserved for netdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13399) static int set_up_context_variables(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13401) unsigned long num_kernel_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13402) u16 num_netdev_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13403) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13404) unsigned ngroups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13405) int rmt_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13406) int user_rmt_reduced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13407) u32 n_usr_ctxts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13408) u32 send_contexts = chip_send_contexts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13409) u32 rcv_contexts = chip_rcv_contexts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13412) * Kernel receive contexts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13413) * - Context 0 - control context (VL15/multicast/error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13414) * - Context 1 - first kernel context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13415) * - Context 2 - second kernel context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13416) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13418) if (n_krcvqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13420) * n_krcvqs is the sum of module parameter kernel receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13421) * contexts, krcvqs[]. It does not include the control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13422) * context, so add that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13424) num_kernel_contexts = n_krcvqs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13425) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13426) num_kernel_contexts = DEFAULT_KRCVQS + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13428) * Every kernel receive context needs an ACK send context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13429) * one send context is allocated for each VL{0-7} and VL15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13431) if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13432) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13433) "Reducing # kernel rcv contexts to: %d, from %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13434) send_contexts - num_vls - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13435) num_kernel_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13436) num_kernel_contexts = send_contexts - num_vls - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13440) * User contexts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13441) * - default to 1 user context per real (non-HT) CPU core if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13442) * num_user_contexts is negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13444) if (num_user_contexts < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13445) n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13446) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13447) n_usr_ctxts = num_user_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13449) * Adjust the counts given a global max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13451) if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13452) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13453) "Reducing # user receive contexts to: %u, from %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13454) (u32)(rcv_contexts - num_kernel_contexts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13455) n_usr_ctxts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13456) /* recalculate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13457) n_usr_ctxts = rcv_contexts - num_kernel_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13460) num_netdev_contexts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13461) hfi1_num_netdev_contexts(dd, rcv_contexts -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13462) (num_kernel_contexts + n_usr_ctxts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13463) &node_affinity.real_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13465) * The RMT entries are currently allocated as shown below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13466) * 1. QOS (0 to 128 entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13467) * 2. FECN (num_kernel_context - 1 + num_user_contexts +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13468) * num_netdev_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13469) * 3. netdev (num_netdev_contexts).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13470) * It should be noted that FECN oversubscribe num_netdev_contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13471) * entries of RMT because both netdev and PSM could allocate any receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13472) * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13473) * and PSM FECN must reserve an RMT entry for each possible PSM receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13474) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13476) rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13477) if (HFI1_CAP_IS_KSET(TID_RDMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13478) rmt_count += num_kernel_contexts - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13479) if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13480) user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13481) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13482) "RMT size is reducing the number of user receive contexts from %u to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13483) n_usr_ctxts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13484) user_rmt_reduced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13485) /* recalculate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13486) n_usr_ctxts = user_rmt_reduced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13489) /* the first N are kernel contexts, the rest are user/netdev contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13490) dd->num_rcv_contexts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13491) num_kernel_contexts + n_usr_ctxts + num_netdev_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13492) dd->n_krcv_queues = num_kernel_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13493) dd->first_dyn_alloc_ctxt = num_kernel_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13494) dd->num_netdev_contexts = num_netdev_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13495) dd->num_user_contexts = n_usr_ctxts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13496) dd->freectxts = n_usr_ctxts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13497) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13498) "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13499) rcv_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13500) (int)dd->num_rcv_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13501) (int)dd->n_krcv_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13502) dd->num_netdev_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13503) dd->num_user_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13506) * Receive array allocation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13507) * All RcvArray entries are divided into groups of 8. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13508) * is required by the hardware and will speed up writes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13509) * consecutive entries by using write-combining of the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13510) * cacheline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13512) * The number of groups are evenly divided among all contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13513) * any left over groups will be given to the first N user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13514) * contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13516) dd->rcv_entries.group_size = RCV_INCREMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13517) ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13518) dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13519) dd->rcv_entries.nctxt_extra = ngroups -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13520) (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13521) dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13522) dd->rcv_entries.ngroups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13523) dd->rcv_entries.nctxt_extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13524) if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13525) MAX_EAGER_ENTRIES * 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13526) dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13527) dd->rcv_entries.group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13528) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13529) "RcvArray group count too high, change to %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13530) dd->rcv_entries.ngroups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13531) dd->rcv_entries.nctxt_extra = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13534) * PIO send contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13536) ret = init_sc_pools_and_sizes(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13537) if (ret >= 0) { /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13538) dd->num_send_contexts = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13539) dd_dev_info(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13540) dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13541) "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13542) send_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13543) dd->num_send_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13544) dd->sc_sizes[SC_KERNEL].count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13545) dd->sc_sizes[SC_ACK].count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13546) dd->sc_sizes[SC_USER].count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13547) dd->sc_sizes[SC_VL15].count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13548) ret = 0; /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13551) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13555) * Set the device/port partition key table. The MAD code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13556) * will ensure that, at least, the partial management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13557) * partition key is present in the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13559) static void set_partition_keys(struct hfi1_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13561) struct hfi1_devdata *dd = ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13562) u64 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13563) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13565) dd_dev_info(dd, "Setting partition keys\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13566) for (i = 0; i < hfi1_get_npkeys(dd); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13567) reg |= (ppd->pkeys[i] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13568) RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13569) ((i % 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13570) RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13571) /* Each register holds 4 PKey values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13572) if ((i % 4) == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13573) write_csr(dd, RCV_PARTITION_KEY +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13574) ((i - 3) * 2), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13575) reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13579) /* Always enable HW pkeys check when pkeys table is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13580) add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13584) * These CSRs and memories are uninitialized on reset and must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13585) * written before reading to set the ECC/parity bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13587) * NOTE: All user context CSRs that are not mmaped write-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13588) * (e.g. the TID flows) must be initialized even if the driver never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13589) * reads them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13591) static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13593) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13595) /* CceIntMap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13596) for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13597) write_csr(dd, CCE_INT_MAP + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13599) /* SendCtxtCreditReturnAddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13600) for (i = 0; i < chip_send_contexts(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13601) write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13603) /* PIO Send buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13604) /* SDMA Send buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13606) * These are not normally read, and (presently) have no method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13607) * to be read, so are not pre-initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13610) /* RcvHdrAddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13611) /* RcvHdrTailAddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13612) /* RcvTidFlowTable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13613) for (i = 0; i < chip_rcv_contexts(dd); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13614) write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13615) write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13616) for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13617) write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13620) /* RcvArray */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13621) for (i = 0; i < chip_rcv_array_count(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13622) hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13624) /* RcvQPMapTable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13625) for (i = 0; i < 32; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13626) write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13630) * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13632) static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13633) u64 ctrl_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13635) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13636) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13638) /* is the condition present? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13639) reg = read_csr(dd, CCE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13640) if ((reg & status_bits) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13641) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13643) /* clear the condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13644) write_csr(dd, CCE_CTRL, ctrl_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13646) /* wait for the condition to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13647) timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13648) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13649) reg = read_csr(dd, CCE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13650) if ((reg & status_bits) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13651) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13652) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13653) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13654) "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13655) status_bits, reg & status_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13656) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13658) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13662) /* set CCE CSRs to chip reset defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13663) static void reset_cce_csrs(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13665) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13667) /* CCE_REVISION read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13668) /* CCE_REVISION2 read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13669) /* CCE_CTRL - bits clear automatically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13670) /* CCE_STATUS read-only, use CceCtrl to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13671) clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13672) clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13673) clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13674) for (i = 0; i < CCE_NUM_SCRATCH; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13675) write_csr(dd, CCE_SCRATCH + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13676) /* CCE_ERR_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13677) write_csr(dd, CCE_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13678) write_csr(dd, CCE_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13679) /* CCE_ERR_FORCE leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13680) for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13681) write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13682) write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13683) /* CCE_PCIE_CTRL leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13684) for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13685) write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13686) write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13687) CCE_MSIX_TABLE_UPPER_RESETCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13689) for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13690) /* CCE_MSIX_PBA read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13691) write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13692) write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13694) for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13695) write_csr(dd, CCE_INT_MAP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13696) for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13697) /* CCE_INT_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13698) write_csr(dd, CCE_INT_MASK + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13699) write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13700) /* CCE_INT_FORCE leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13701) /* CCE_INT_BLOCKED read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13703) for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13704) write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13707) /* set MISC CSRs to chip reset defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13708) static void reset_misc_csrs(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13710) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13712) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13713) write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13714) write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13715) write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13718) * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13719) * only be written 128-byte chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13721) /* init RSA engine to clear lingering errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13722) write_csr(dd, MISC_CFG_RSA_CMD, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13723) write_csr(dd, MISC_CFG_RSA_MU, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13724) write_csr(dd, MISC_CFG_FW_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13725) /* MISC_STS_8051_DIGEST read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13726) /* MISC_STS_SBM_DIGEST read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13727) /* MISC_STS_PCIE_DIGEST read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13728) /* MISC_STS_FAB_DIGEST read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13729) /* MISC_ERR_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13730) write_csr(dd, MISC_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13731) write_csr(dd, MISC_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13732) /* MISC_ERR_FORCE leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13735) /* set TXE CSRs to chip reset defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13736) static void reset_txe_csrs(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13738) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13741) * TXE Kernel CSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13743) write_csr(dd, SEND_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13744) __cm_reset(dd, 0); /* reset CM internal state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13745) /* SEND_CONTEXTS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13746) /* SEND_DMA_ENGINES read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13747) /* SEND_PIO_MEM_SIZE read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13748) /* SEND_DMA_MEM_SIZE read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13749) write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13750) pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13751) /* SEND_PIO_ERR_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13752) write_csr(dd, SEND_PIO_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13753) write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13754) /* SEND_PIO_ERR_FORCE leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13755) /* SEND_DMA_ERR_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13756) write_csr(dd, SEND_DMA_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13757) write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13758) /* SEND_DMA_ERR_FORCE leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13759) /* SEND_EGRESS_ERR_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13760) write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13761) write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13762) /* SEND_EGRESS_ERR_FORCE leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13763) write_csr(dd, SEND_BTH_QP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13764) write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13765) write_csr(dd, SEND_SC2VLT0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13766) write_csr(dd, SEND_SC2VLT1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13767) write_csr(dd, SEND_SC2VLT2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13768) write_csr(dd, SEND_SC2VLT3, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13769) write_csr(dd, SEND_LEN_CHECK0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13770) write_csr(dd, SEND_LEN_CHECK1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13771) /* SEND_ERR_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13772) write_csr(dd, SEND_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13773) write_csr(dd, SEND_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13774) /* SEND_ERR_FORCE read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13775) for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13776) write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13777) for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13778) write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13779) for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13780) write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13781) for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13782) write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13783) for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13784) write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13785) write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13786) write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13787) /* SEND_CM_CREDIT_USED_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13788) write_csr(dd, SEND_CM_TIMER_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13789) write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13790) write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13791) write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13792) write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13793) for (i = 0; i < TXE_NUM_DATA_VL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13794) write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13795) write_csr(dd, SEND_CM_CREDIT_VL15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13796) /* SEND_CM_CREDIT_USED_VL read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13797) /* SEND_CM_CREDIT_USED_VL15 read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13798) /* SEND_EGRESS_CTXT_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13799) /* SEND_EGRESS_SEND_DMA_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13800) write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13801) /* SEND_EGRESS_ERR_INFO read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13802) /* SEND_EGRESS_ERR_SOURCE read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13805) * TXE Per-Context CSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13807) for (i = 0; i < chip_send_contexts(dd); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13808) write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13809) write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13810) write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13811) write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13812) write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13813) write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13814) write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13815) write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13816) write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13817) write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13818) write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13819) write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13823) * TXE Per-SDMA CSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13824) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13825) for (i = 0; i < chip_sdma_engines(dd); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13826) write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13827) /* SEND_DMA_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13828) write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13829) write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13830) write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13831) /* SEND_DMA_HEAD read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13832) write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13833) write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13834) /* SEND_DMA_IDLE_CNT read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13835) write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13836) write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13837) /* SEND_DMA_DESC_FETCHED_CNT read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13838) /* SEND_DMA_ENG_ERR_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13839) write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13840) write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13841) /* SEND_DMA_ENG_ERR_FORCE leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13842) write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13843) write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13844) write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13845) write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13846) write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13847) write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13848) write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13853) * Expect on entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13854) * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13856) static void init_rbufs(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13858) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13859) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13862) * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13863) * clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13865) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13866) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13867) reg = read_csr(dd, RCV_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13868) if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13869) | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13872) * Give up after 1ms - maximum wait time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13874) * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13875) * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13876) * 136 KB / (66% * 250MB/s) = 844us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13878) if (count++ > 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13879) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13880) "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13881) __func__, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13884) udelay(2); /* do not busy-wait the CSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13887) /* start the init - expect RcvCtrl to be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13888) write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13891) * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13892) * period after the write before RcvStatus.RxRbufInitDone is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13893) * The delay in the first run through the loop below is sufficient and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13894) * required before the first read of RcvStatus.RxRbufInintDone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13896) read_csr(dd, RCV_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13898) /* wait for the init to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13899) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13900) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13901) /* delay is required first time through - see above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13902) udelay(2); /* do not busy-wait the CSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13903) reg = read_csr(dd, RCV_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13904) if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13907) /* give up after 100us - slowest possible at 33MHz is 73us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13908) if (count++ > 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13909) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13910) "%s: RcvStatus.RxRbufInit not set, continuing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13911) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13917) /* set RXE CSRs to chip reset defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13918) static void reset_rxe_csrs(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13920) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13923) * RXE Kernel CSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13925) write_csr(dd, RCV_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13926) init_rbufs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13927) /* RCV_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13928) /* RCV_CONTEXTS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13929) /* RCV_ARRAY_CNT read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13930) /* RCV_BUF_SIZE read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13931) write_csr(dd, RCV_BTH_QP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13932) write_csr(dd, RCV_MULTICAST, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13933) write_csr(dd, RCV_BYPASS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13934) write_csr(dd, RCV_VL15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13935) /* this is a clear-down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13936) write_csr(dd, RCV_ERR_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13937) RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13938) /* RCV_ERR_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13939) write_csr(dd, RCV_ERR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13940) write_csr(dd, RCV_ERR_CLEAR, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13941) /* RCV_ERR_FORCE leave alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13942) for (i = 0; i < 32; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13943) write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13944) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13945) write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13946) for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13947) write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13948) for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13949) write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13950) for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13951) clear_rsm_rule(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13952) for (i = 0; i < 32; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13953) write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13956) * RXE Kernel and User Per-Context CSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13958) for (i = 0; i < chip_rcv_contexts(dd); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13959) /* kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13960) write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13961) /* RCV_CTXT_STATUS read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13962) write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13963) write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13964) write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13965) write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13966) write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13967) write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13968) write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13969) write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13970) write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13971) write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13973) /* user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13974) /* RCV_HDR_TAIL read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13975) write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13976) /* RCV_EGR_INDEX_TAIL read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13977) write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13978) /* RCV_EGR_OFFSET_TAIL read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13979) for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13980) write_uctxt_csr(dd, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13981) RCV_TID_FLOW_TABLE + (8 * j), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13986) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13987) * Set sc2vl tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13988) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13989) * They power on to zeros, so to avoid send context errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13990) * they need to be set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13991) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13992) * SC 0-7 -> VL 0-7 (respectively)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13993) * SC 15 -> VL 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13994) * otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13995) * -> VL 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13997) static void init_sc2vl_tables(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13999) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14000) /* init per architecture spec, constrained by hardware capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14002) /* HFI maps sent packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14003) write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14004) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14005) 0, 0, 1, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14006) 2, 2, 3, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14007) 4, 4, 5, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14008) 6, 6, 7, 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14009) write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14010) 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14011) 8, 0, 9, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14012) 10, 0, 11, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14013) 12, 0, 13, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14014) 14, 0, 15, 15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14015) write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14016) 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14017) 16, 0, 17, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14018) 18, 0, 19, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14019) 20, 0, 21, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14020) 22, 0, 23, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14021) write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14022) 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14023) 24, 0, 25, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14024) 26, 0, 27, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14025) 28, 0, 29, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14026) 30, 0, 31, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14028) /* DC maps received packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14029) write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14030) 15_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14031) 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14032) 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14033) write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14034) 31_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14035) 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14036) 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14038) /* initialize the cached sc2vl values consistently with h/w */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14039) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14040) if (i < 8 || i == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14041) *((u8 *)(dd->sc2vl) + i) = (u8)i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14042) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14043) *((u8 *)(dd->sc2vl) + i) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14048) * Read chip sizes and then reset parts to sane, disabled, values. We cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14049) * depend on the chip going through a power-on reset - a driver may be loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14050) * and unloaded many times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14052) * Do not write any CSR values to the chip in this routine - there may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14053) * a reset following the (possible) FLR in this routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14054) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14056) static int init_chip(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14058) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14059) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14062) * Put the HFI CSRs in a known state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14063) * Combine this with a DC reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14064) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14065) * Stop the device from doing anything while we do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14066) * reset. We know there are no other active users of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14067) * the device since we are now in charge. Turn off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14068) * off all outbound and inbound traffic and make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14069) * the device does not generate any interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14072) /* disable send contexts and SDMA engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14073) write_csr(dd, SEND_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14074) for (i = 0; i < chip_send_contexts(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14075) write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14076) for (i = 0; i < chip_sdma_engines(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14077) write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14078) /* disable port (turn off RXE inbound traffic) and contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14079) write_csr(dd, RCV_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14080) for (i = 0; i < chip_rcv_contexts(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14081) write_csr(dd, RCV_CTXT_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14082) /* mask all interrupt sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14083) for (i = 0; i < CCE_NUM_INT_CSRS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14084) write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14087) * DC Reset: do a full DC reset before the register clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14088) * A recommended length of time to hold is one CSR read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14089) * so reread the CceDcCtrl. Then, hold the DC in reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14090) * across the clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14092) write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14093) (void)read_csr(dd, CCE_DC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14095) if (use_flr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14097) * A FLR will reset the SPC core and part of the PCIe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14098) * The parts that need to be restored have already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14099) * saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14101) dd_dev_info(dd, "Resetting CSRs with FLR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14103) /* do the FLR, the DC reset will remain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14104) pcie_flr(dd->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14106) /* restore command and BARs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14107) ret = restore_pci_variables(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14108) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14109) dd_dev_err(dd, "%s: Could not restore PCI variables\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14110) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14111) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14114) if (is_ax(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14115) dd_dev_info(dd, "Resetting CSRs with FLR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14116) pcie_flr(dd->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14117) ret = restore_pci_variables(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14118) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14119) dd_dev_err(dd, "%s: Could not restore PCI variables\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14120) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14121) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14125) dd_dev_info(dd, "Resetting CSRs with writes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14126) reset_cce_csrs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14127) reset_txe_csrs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14128) reset_rxe_csrs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14129) reset_misc_csrs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14131) /* clear the DC reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14132) write_csr(dd, CCE_DC_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14134) /* Set the LED off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14135) setextled(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14138) * Clear the QSFP reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14139) * An FLR enforces a 0 on all out pins. The driver does not touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14140) * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14141) * anything plugged constantly in reset, if it pays attention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14142) * to RESET_N.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14143) * Prime examples of this are optical cables. Set all pins high.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14144) * I2CCLK and I2CDAT will change per direction, and INT_N and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14145) * MODPRS_N are input only and their value is ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14147) write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14148) write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14149) init_chip_resources(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14150) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14153) static void init_early_variables(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14155) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14157) /* assign link credit variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14158) dd->vau = CM_VAU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14159) dd->link_credits = CM_GLOBAL_CREDITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14160) if (is_ax(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14161) dd->link_credits--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14162) dd->vcu = cu_to_vcu(hfi1_cu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14163) /* enough room for 8 MAD packets plus header - 17K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14164) dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14165) if (dd->vl15_init > dd->link_credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14166) dd->vl15_init = dd->link_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14168) write_uninitialized_csrs_and_memories(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14170) if (HFI1_CAP_IS_KSET(PKEY_CHECK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14171) for (i = 0; i < dd->num_pports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14172) struct hfi1_pportdata *ppd = &dd->pport[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14174) set_partition_keys(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14176) init_sc2vl_tables(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14179) static void init_kdeth_qp(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14181) write_csr(dd, SEND_BTH_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14182) (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14183) SEND_BTH_QP_KDETH_QP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14185) write_csr(dd, RCV_BTH_QP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14186) (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14187) RCV_BTH_QP_KDETH_QP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14191) * hfi1_get_qp_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14192) * @dd: device data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14193) * @idx: index to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14195) u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14197) u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14199) reg >>= (idx % 8) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14200) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14204) * init_qpmap_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14205) * @dd - device data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14206) * @first_ctxt - first context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14207) * @last_ctxt - first context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14209) * This return sets the qpn mapping table that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14210) * is indexed by qpn[8:1].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14212) * The routine will round robin the 256 settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14213) * from first_ctxt to last_ctxt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14215) * The first/last looks ahead to having specialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14216) * receive contexts for mgmt and bypass. Normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14217) * verbs traffic will assumed to be on a range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14218) * of receive contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14220) static void init_qpmap_table(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14221) u32 first_ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14222) u32 last_ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14224) u64 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14225) u64 regno = RCV_QP_MAP_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14226) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14227) u64 ctxt = first_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14229) for (i = 0; i < 256; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14230) reg |= ctxt << (8 * (i % 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14231) ctxt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14232) if (ctxt > last_ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14233) ctxt = first_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14234) if (i % 8 == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14235) write_csr(dd, regno, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14236) reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14237) regno += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14241) add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14242) | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14245) struct rsm_map_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14246) u64 map[NUM_MAP_REGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14247) unsigned int used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14250) struct rsm_rule_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14251) u8 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14252) u8 pkt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14253) u32 field1_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14254) u32 field2_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14255) u32 index1_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14256) u32 index1_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14257) u32 index2_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14258) u32 index2_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14259) u32 mask1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14260) u32 value1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14261) u32 mask2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14262) u32 value2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14266) * Return an initialized RMT map table for users to fill in. OK if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14267) * returns NULL, indicating no table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14269) static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14271) struct rsm_map_table *rmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14272) u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14274) rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14275) if (rmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14276) memset(rmt->map, rxcontext, sizeof(rmt->map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14277) rmt->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14280) return rmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14284) * Write the final RMT map table to the chip and free the table. OK if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14285) * table is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14287) static void complete_rsm_map_table(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14288) struct rsm_map_table *rmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14290) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14292) if (rmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14293) /* write table to chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14294) for (i = 0; i < NUM_MAP_REGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14295) write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14297) /* enable RSM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14298) add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14302) /* Is a receive side mapping rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14303) static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14305) return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14309) * Add a receive side mapping rule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14311) static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14312) struct rsm_rule_data *rrd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14314) write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14315) (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14316) 1ull << rule_index | /* enable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14317) (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14318) write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14319) (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14320) (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14321) (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14322) (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14323) (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14324) (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14325) write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14326) (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14327) (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14328) (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14329) (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14333) * Clear a receive side mapping rule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14335) static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14337) write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14338) write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14339) write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14342) /* return the number of RSM map table entries that will be used for QOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14343) static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14344) unsigned int *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14346) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14347) unsigned int m, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14348) u8 max_by_vl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14350) /* is QOS active at all? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14351) if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14352) num_vls == 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14353) krcvqsset <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14354) goto no_qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14356) /* determine bits for qpn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14357) for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14358) if (krcvqs[i] > max_by_vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14359) max_by_vl = krcvqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14360) if (max_by_vl > 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14361) goto no_qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14362) m = ilog2(__roundup_pow_of_two(max_by_vl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14364) /* determine bits for vl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14365) n = ilog2(__roundup_pow_of_two(num_vls));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14367) /* reject if too much is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14368) if ((m + n) > 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14369) goto no_qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14371) if (mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14372) *mp = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14373) if (np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14374) *np = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14376) return 1 << (m + n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14378) no_qos:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14379) if (mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14380) *mp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14381) if (np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14382) *np = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14387) * init_qos - init RX qos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14388) * @dd - device data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14389) * @rmt - RSM map table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14391) * This routine initializes Rule 0 and the RSM map table to implement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14392) * quality of service (qos).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14394) * If all of the limit tests succeed, qos is applied based on the array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14395) * interpretation of krcvqs where entry 0 is VL0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14397) * The number of vl bits (n) and the number of qpn bits (m) are computed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14398) * feed both the RSM map table and the single rule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14400) static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14402) struct rsm_rule_data rrd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14403) unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14404) unsigned int rmt_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14405) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14407) if (!rmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14408) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14409) rmt_entries = qos_rmt_entries(dd, &m, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14410) if (rmt_entries == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14411) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14412) qpns_per_vl = 1 << m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14414) /* enough room in the map table? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14415) rmt_entries = 1 << (m + n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14416) if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14417) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14419) /* add qos entries to the the RSM map table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14420) for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14421) unsigned tctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14423) for (qpn = 0, tctxt = ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14424) krcvqs[i] && qpn < qpns_per_vl; qpn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14425) unsigned idx, regoff, regidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14427) /* generate the index the hardware will produce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14428) idx = rmt->used + ((qpn << n) ^ i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14429) regoff = (idx % 8) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14430) regidx = idx / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14431) /* replace default with context number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14432) reg = rmt->map[regidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14433) reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14434) << regoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14435) reg |= (u64)(tctxt++) << regoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14436) rmt->map[regidx] = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14437) if (tctxt == ctxt + krcvqs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14438) tctxt = ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14440) ctxt += krcvqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14443) rrd.offset = rmt->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14444) rrd.pkt_type = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14445) rrd.field1_off = LRH_BTH_MATCH_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14446) rrd.field2_off = LRH_SC_MATCH_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14447) rrd.index1_off = LRH_SC_SELECT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14448) rrd.index1_width = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14449) rrd.index2_off = QPN_SELECT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14450) rrd.index2_width = m + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14451) rrd.mask1 = LRH_BTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14452) rrd.value1 = LRH_BTH_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14453) rrd.mask2 = LRH_SC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14454) rrd.value2 = LRH_SC_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14456) /* add rule 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14457) add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14459) /* mark RSM map entries as used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14460) rmt->used += rmt_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14461) /* map everything else to the mcast/err/vl15 context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14462) init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14463) dd->qos_shift = n + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14464) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14465) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14466) dd->qos_shift = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14467) init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14470) static void init_fecn_handling(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14471) struct rsm_map_table *rmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14473) struct rsm_rule_data rrd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14474) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14475) int i, idx, regoff, regidx, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14476) u8 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14477) u32 total_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14479) if (HFI1_CAP_IS_KSET(TID_RDMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14480) /* Exclude context 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14481) start = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14482) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14483) start = dd->first_dyn_alloc_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14485) total_cnt = dd->num_rcv_contexts - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14487) /* there needs to be enough room in the map table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14488) if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14489) dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14490) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14494) * RSM will extract the destination context as an index into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14495) * map table. The destination contexts are a sequential block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14496) * in the range start...num_rcv_contexts-1 (inclusive).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14497) * Map entries are accessed as offset + extracted value. Adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14498) * the added offset so this sequence can be placed anywhere in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14499) * the table - as long as the entries themselves do not wrap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14500) * There are only enough bits in offset for the table size, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14501) * start with that to allow for a "negative" offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14503) offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14505) for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14506) i++, idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14507) /* replace with identity mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14508) regoff = (idx % 8) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14509) regidx = idx / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14510) reg = rmt->map[regidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14511) reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14512) reg |= (u64)i << regoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14513) rmt->map[regidx] = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14517) * For RSM intercept of Expected FECN packets:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14518) * o packet type 0 - expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14519) * o match on F (bit 95), using select/match 1, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14520) * o match on SH (bit 133), using select/match 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14522) * Use index 1 to extract the 8-bit receive context from DestQP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14523) * (start at bit 64). Use that as the RSM map table index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14525) rrd.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14526) rrd.pkt_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14527) rrd.field1_off = 95;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14528) rrd.field2_off = 133;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14529) rrd.index1_off = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14530) rrd.index1_width = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14531) rrd.index2_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14532) rrd.index2_width = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14533) rrd.mask1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14534) rrd.value1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14535) rrd.mask2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14536) rrd.value2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14538) /* add rule 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14539) add_rsm_rule(dd, RSM_INS_FECN, &rrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14541) rmt->used += total_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14544) static inline bool hfi1_is_rmt_full(int start, int spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14546) return (start + spare) > NUM_MAP_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14549) static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14551) u8 i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14552) u8 ctx_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14553) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14554) u32 regoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14555) int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14556) int ctxt_count = hfi1_netdev_ctxt_count(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14558) /* We already have contexts mapped in RMT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14559) if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14560) dd_dev_info(dd, "Contexts are already mapped in RMT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14561) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14564) if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14565) dd_dev_err(dd, "Not enough RMT entries used = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14566) rmt_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14567) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14570) dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14571) rmt_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14572) rmt_start + NUM_NETDEV_MAP_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14574) /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14575) regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14576) reg = read_csr(dd, regoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14577) for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14578) /* Update map register with netdev context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14579) j = (rmt_start + i) % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14580) reg &= ~(0xffllu << (j * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14581) reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14582) /* Wrap up netdev ctx index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14583) ctx_id %= ctxt_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14584) /* Write back map register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14585) if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14586) dev_dbg(&(dd)->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14587) "RMT[%d] =0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14588) regoff - RCV_RSM_MAP_TABLE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14590) write_csr(dd, regoff, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14591) regoff += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14592) if (i < (NUM_NETDEV_MAP_ENTRIES - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14593) reg = read_csr(dd, regoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14597) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14600) static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14601) int rule, struct rsm_rule_data *rrd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14603) if (!hfi1_netdev_update_rmt(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14604) dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14605) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14608) add_rsm_rule(dd, rule, rrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14609) add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14612) void hfi1_init_aip_rsm(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14615) * go through with the initialisation only if this rule actually doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14616) * exist yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14618) if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14619) int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14620) struct rsm_rule_data rrd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14621) .offset = rmt_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14622) .pkt_type = IB_PACKET_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14623) .field1_off = LRH_BTH_MATCH_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14624) .mask1 = LRH_BTH_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14625) .value1 = LRH_BTH_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14626) .field2_off = BTH_DESTQP_MATCH_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14627) .mask2 = BTH_DESTQP_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14628) .value2 = BTH_DESTQP_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14629) .index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14630) ilog2(NUM_NETDEV_MAP_ENTRIES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14631) .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14632) .index2_off = DETH_AIP_SQPN_SELECT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14633) .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14636) hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14640) /* Initialize RSM for VNIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14641) void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14643) int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14644) struct rsm_rule_data rrd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14645) /* Add rule for vnic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14646) .offset = rmt_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14647) .pkt_type = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14648) /* Match 16B packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14649) .field1_off = L2_TYPE_MATCH_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14650) .mask1 = L2_TYPE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14651) .value1 = L2_16B_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14652) /* Match ETH L4 packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14653) .field2_off = L4_TYPE_MATCH_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14654) .mask2 = L4_16B_TYPE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14655) .value2 = L4_16B_ETH_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14656) /* Calc context from veswid and entropy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14657) .index1_off = L4_16B_HDR_VESWID_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14658) .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14659) .index2_off = L2_16B_ENTROPY_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14660) .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14661) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14663) hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14666) void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14668) clear_rsm_rule(dd, RSM_INS_VNIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14671) void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14673) /* only actually clear the rule if it's the last user asking to do so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14674) if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14675) clear_rsm_rule(dd, RSM_INS_AIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14678) static int init_rxe(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14680) struct rsm_map_table *rmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14681) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14683) /* enable all receive errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14684) write_csr(dd, RCV_ERR_MASK, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14686) rmt = alloc_rsm_map_table(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14687) if (!rmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14688) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14690) /* set up QOS, including the QPN map table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14691) init_qos(dd, rmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14692) init_fecn_handling(dd, rmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14693) complete_rsm_map_table(dd, rmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14694) /* record number of used rsm map entries for netdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14695) hfi1_netdev_set_free_rmt_idx(dd, rmt->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14696) kfree(rmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14699) * make sure RcvCtrl.RcvWcb <= PCIe Device Control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14700) * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14701) * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14702) * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14703) * Max_PayLoad_Size set to its minimum of 128.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14704) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14705) * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14706) * (64 bytes). Max_Payload_Size is possibly modified upward in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14707) * tune_pcie_caps() which is called after this routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14710) /* Have 16 bytes (4DW) of bypass header available in header queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14711) val = read_csr(dd, RCV_BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14712) val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14713) val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14714) RCV_BYPASS_HDR_SIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14715) write_csr(dd, RCV_BYPASS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14719) static void init_other(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14721) /* enable all CCE errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14722) write_csr(dd, CCE_ERR_MASK, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14723) /* enable *some* Misc errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14724) write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14725) /* enable all DC errors, except LCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14726) write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14727) write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14731) * Fill out the given AU table using the given CU. A CU is defined in terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14732) * AUs. The table is a an encoding: given the index, how many AUs does that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14733) * represent?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14734) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14735) * NOTE: Assumes that the register layout is the same for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14736) * local and remote tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14738) static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14739) u32 csr0to3, u32 csr4to7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14741) write_csr(dd, csr0to3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14742) 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14743) 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14744) 2ull * cu <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14745) SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14746) 4ull * cu <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14747) SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14748) write_csr(dd, csr4to7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14749) 8ull * cu <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14750) SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14751) 16ull * cu <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14752) SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14753) 32ull * cu <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14754) SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14755) 64ull * cu <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14756) SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14759) static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14761) assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14762) SEND_CM_LOCAL_AU_TABLE4_TO7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14765) void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14767) assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14768) SEND_CM_REMOTE_AU_TABLE4_TO7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14771) static void init_txe(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14773) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14775) /* enable all PIO, SDMA, general, and Egress errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14776) write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14777) write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14778) write_csr(dd, SEND_ERR_MASK, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14779) write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14781) /* enable all per-context and per-SDMA engine errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14782) for (i = 0; i < chip_send_contexts(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14783) write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14784) for (i = 0; i < chip_sdma_engines(dd); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14785) write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14787) /* set the local CU to AU mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14788) assign_local_cm_au_table(dd, dd->vcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14791) * Set reasonable default for Credit Return Timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14792) * Don't set on Simulator - causes it to choke.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14794) if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14795) write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14798) int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14799) u16 jkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14801) u8 hw_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14802) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14804) if (!rcd || !rcd->sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14805) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14807) hw_ctxt = rcd->sc->hw_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14808) reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14809) ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14810) SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14811) /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14812) if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14813) reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14814) write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14816) * Enable send-side J_KEY integrity check, unless this is A0 h/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14818) if (!is_ax(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14819) reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14820) reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14821) write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14824) /* Enable J_KEY check on receive context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14825) reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14826) ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14827) RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14828) write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14833) int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14835) u8 hw_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14836) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14838) if (!rcd || !rcd->sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14839) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14841) hw_ctxt = rcd->sc->hw_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14842) write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14843) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14844) * Disable send-side J_KEY integrity check, unless this is A0 h/w.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14845) * This check would not have been enabled for A0 h/w, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14846) * set_ctxt_jkey().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14848) if (!is_ax(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14849) reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14850) reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14851) write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14853) /* Turn off the J_KEY on the receive side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14854) write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14859) int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14860) u16 pkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14862) u8 hw_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14863) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14865) if (!rcd || !rcd->sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14866) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14868) hw_ctxt = rcd->sc->hw_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14869) reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14870) SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14871) write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14872) reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14873) reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14874) reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14875) write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14877) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14880) int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14882) u8 hw_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14883) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14885) if (!ctxt || !ctxt->sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14886) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14888) hw_ctxt = ctxt->sc->hw_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14889) reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14890) reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14891) write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14892) write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14898) * Start doing the clean up the the chip. Our clean up happens in multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14899) * stages and this is just the first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14901) void hfi1_start_cleanup(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14903) aspm_exit(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14904) free_cntrs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14905) free_rcverr(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14906) finish_chip_resources(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14909) #define HFI_BASE_GUID(dev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14910) ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14913) * Information can be shared between the two HFIs on the same ASIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14914) * in the same OS. This function finds the peer device and sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14915) * up a shared structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14916) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14917) static int init_asic_data(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14919) unsigned long index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14920) struct hfi1_devdata *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14921) struct hfi1_asic_data *asic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14922) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14924) /* pre-allocate the asic structure in case we are the first device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14925) asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14926) if (!asic_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14927) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14929) xa_lock_irq(&hfi1_dev_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14930) /* Find our peer device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14931) xa_for_each(&hfi1_dev_table, index, peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14932) if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14933) dd->unit != peer->unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14937) if (peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14938) /* use already allocated structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14939) dd->asic_data = peer->asic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14940) kfree(asic_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14941) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14942) dd->asic_data = asic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14943) mutex_init(&dd->asic_data->asic_resource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14945) dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14946) xa_unlock_irq(&hfi1_dev_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14948) /* first one through - set up i2c devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14949) if (!peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14950) ret = set_up_i2c(dd, dd->asic_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14952) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14956) * Set dd->boardname. Use a generic name if a name is not returned from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14957) * EFI variable space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14958) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14959) * Return 0 on success, -ENOMEM if space could not be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14961) static int obtain_boardname(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14963) /* generic board description */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14964) const char generic[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14965) "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14966) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14967) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14969) ret = read_hfi1_efi_var(dd, "description", &size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14970) (void **)&dd->boardname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14971) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14972) dd_dev_info(dd, "Board description not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14973) /* use generic description */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14974) dd->boardname = kstrdup(generic, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14975) if (!dd->boardname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14976) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14982) * Check the interrupt registers to make sure that they are mapped correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14983) * It is intended to help user identify any mismapping by VMM when the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14984) * is running in a VM. This function should only be called before interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14985) * is set up properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14986) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14987) * Return 0 on success, -EINVAL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14989) static int check_int_registers(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14991) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14992) u64 all_bits = ~(u64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14993) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14995) /* Clear CceIntMask[0] to avoid raising any interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14996) mask = read_csr(dd, CCE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14997) write_csr(dd, CCE_INT_MASK, 0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14998) reg = read_csr(dd, CCE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14999) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15000) goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15002) /* Clear all interrupt status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15003) write_csr(dd, CCE_INT_CLEAR, all_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15004) reg = read_csr(dd, CCE_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15005) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15006) goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15008) /* Set all interrupt status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15009) write_csr(dd, CCE_INT_FORCE, all_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15010) reg = read_csr(dd, CCE_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15011) if (reg != all_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15012) goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15014) /* Restore the interrupt mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15015) write_csr(dd, CCE_INT_CLEAR, all_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15016) write_csr(dd, CCE_INT_MASK, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15018) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15019) err_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15020) write_csr(dd, CCE_INT_MASK, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15021) dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15022) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15025) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15026) * hfi1_init_dd() - Initialize most of the dd structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15027) * @dev: the pci_dev for hfi1_ib device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15028) * @ent: pci_device_id struct for this dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15030) * This is global, and is called directly at init to set up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15031) * chip-specific function pointers for later use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15033) int hfi1_init_dd(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15035) struct pci_dev *pdev = dd->pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15036) struct hfi1_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15037) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15038) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15039) static const char * const inames[] = { /* implementation names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15040) "RTL silicon",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15041) "RTL VCS simulation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15042) "RTL FPGA emulation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15043) "Functional simulator"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15044) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15045) struct pci_dev *parent = pdev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15046) u32 sdma_engines = chip_sdma_engines(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15048) ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15049) for (i = 0; i < dd->num_pports; i++, ppd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15050) int vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15051) /* init common fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15052) hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15053) /* DC supports 4 link widths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15054) ppd->link_width_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15055) OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15056) OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15057) ppd->link_width_downgrade_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15058) ppd->link_width_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15059) /* start out enabling only 4X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15060) ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15061) ppd->link_width_downgrade_enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15062) ppd->link_width_downgrade_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15063) /* link width active is 0 when link is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15064) /* link width downgrade active is 0 when link is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15066) if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15067) num_vls > HFI1_MAX_VLS_SUPPORTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15068) dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15069) num_vls, HFI1_MAX_VLS_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15070) num_vls = HFI1_MAX_VLS_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15072) ppd->vls_supported = num_vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15073) ppd->vls_operational = ppd->vls_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15074) /* Set the default MTU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15075) for (vl = 0; vl < num_vls; vl++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15076) dd->vld[vl].mtu = hfi1_max_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15077) dd->vld[15].mtu = MAX_MAD_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15078) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15079) * Set the initial values to reasonable default, will be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15080) * for real when link is up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15082) ppd->overrun_threshold = 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15083) ppd->phy_error_threshold = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15084) ppd->port_crc_mode_enabled = link_crc_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15085) /* initialize supported LTP CRC mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15086) ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15087) /* initialize enabled LTP CRC mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15088) ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15089) /* start in offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15090) ppd->host_link_state = HLS_DN_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15091) init_vl_arb_caches(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15095) * Do remaining PCIe setup and save PCIe values in dd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15096) * Any error printing is already done by the init code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15097) * On return, we have the chip mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15099) ret = hfi1_pcie_ddinit(dd, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15100) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15101) goto bail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15103) /* Save PCI space registers to rewrite after device reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15104) ret = save_pci_variables(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15105) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15106) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15108) dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15109) & CCE_REVISION_CHIP_REV_MAJOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15110) dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15111) & CCE_REVISION_CHIP_REV_MINOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15114) * Check interrupt registers mapping if the driver has no access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15115) * the upstream component. In this case, it is likely that the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15116) * is running in a VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15118) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15119) ret = check_int_registers(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15120) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15121) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15125) * obtain the hardware ID - NOT related to unit, which is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15126) * software enumeration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15128) reg = read_csr(dd, CCE_REVISION2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15129) dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15130) & CCE_REVISION2_HFI_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15131) /* the variable size will remove unwanted bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15132) dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15133) dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15134) dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15135) dd->icode < ARRAY_SIZE(inames) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15136) inames[dd->icode] : "unknown", (int)dd->irev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15138) /* speeds the hardware can support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15139) dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15140) /* speeds allowed to run at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15141) dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15142) /* give a reasonable active value, will be set on link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15143) dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15145) /* fix up link widths for emulation _p */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15146) ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15147) if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15148) ppd->link_width_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15149) ppd->link_width_enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15150) ppd->link_width_downgrade_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15151) ppd->link_width_downgrade_enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15152) OPA_LINK_WIDTH_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15154) /* insure num_vls isn't larger than number of sdma engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15155) if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15156) dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15157) num_vls, sdma_engines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15158) num_vls = sdma_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15159) ppd->vls_supported = sdma_engines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15160) ppd->vls_operational = ppd->vls_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15164) * Convert the ns parameter to the 64 * cclocks used in the CSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15165) * Limit the max if larger than the field holds. If timeout is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15166) * non-zero, then the calculated field will be at least 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15168) * Must be after icode is set up - the cclock rate depends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15169) * on knowing the hardware being used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15171) dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15172) if (dd->rcv_intr_timeout_csr >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15173) RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15174) dd->rcv_intr_timeout_csr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15175) RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15176) else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15177) dd->rcv_intr_timeout_csr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15179) /* needs to be done before we look for the peer device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15180) read_guid(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15182) /* set up shared ASIC data with peer device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15183) ret = init_asic_data(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15184) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15185) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15187) /* obtain chip sizes, reset chip CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15188) ret = init_chip(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15189) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15190) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15192) /* read in the PCIe link speed information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15193) ret = pcie_speeds(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15194) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15195) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15197) /* call before get_platform_config(), after init_chip_resources() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15198) ret = eprom_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15199) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15200) goto bail_free_rcverr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15202) /* Needs to be called before hfi1_firmware_init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15203) get_platform_config(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15205) /* read in firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15206) ret = hfi1_firmware_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15207) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15208) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15211) * In general, the PCIe Gen3 transition must occur after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15212) * chip has been idled (so it won't initiate any PCIe transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15213) * e.g. an interrupt) and before the driver changes any registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15214) * (the transition will reset the registers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15216) * In particular, place this call after:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15217) * - init_chip() - the chip will not initiate any PCIe transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15218) * - pcie_speeds() - reads the current link speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15219) * - hfi1_firmware_init() - the needed firmware is ready to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15220) * downloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15222) ret = do_pcie_gen3_transition(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15223) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15224) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15227) * This should probably occur in hfi1_pcie_init(), but historically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15228) * occurs after the do_pcie_gen3_transition() code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15230) tune_pcie_caps(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15232) /* start setting dd values and adjusting CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15233) init_early_variables(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15235) parse_platform_config(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15237) ret = obtain_boardname(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15238) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15239) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15241) snprintf(dd->boardversion, BOARD_VERS_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15242) "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15243) HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15244) (u32)dd->majrev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15245) (u32)dd->minrev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15246) (dd->revision >> CCE_REVISION_SW_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15247) & CCE_REVISION_SW_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15249) /* alloc netdev data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15250) ret = hfi1_netdev_alloc(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15251) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15252) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15254) ret = set_up_context_variables(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15255) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15256) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15258) /* set initial RXE CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15259) ret = init_rxe(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15260) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15261) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15263) /* set initial TXE CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15264) init_txe(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15265) /* set initial non-RXE, non-TXE CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15266) init_other(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15267) /* set up KDETH QP prefix in both RX and TX CSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15268) init_kdeth_qp(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15270) ret = hfi1_dev_affinity_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15271) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15272) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15274) /* send contexts must be set up before receive contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15275) ret = init_send_contexts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15276) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15277) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15279) ret = hfi1_create_kctxts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15280) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15281) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15284) * Initialize aspm, to be done after gen3 transition and setting up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15285) * contexts and before enabling interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15287) aspm_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15289) ret = init_pervl_scs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15290) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15291) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15293) /* sdma init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15294) for (i = 0; i < dd->num_pports; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15295) ret = sdma_init(dd, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15296) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15297) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15300) /* use contexts created by hfi1_create_kctxts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15301) ret = set_up_interrupts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15302) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15303) goto bail_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15305) ret = hfi1_comp_vectors_set_up(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15306) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15307) goto bail_clear_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15309) /* set up LCB access - must be after set_up_interrupts() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15310) init_lcb_access(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15313) * Serial number is created from the base guid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15314) * [27:24] = base guid [38:35]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15315) * [23: 0] = base guid [23: 0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15317) snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15318) (dd->base_guid & 0xFFFFFF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15319) ((dd->base_guid >> 11) & 0xF000000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15321) dd->oui1 = dd->base_guid >> 56 & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15322) dd->oui2 = dd->base_guid >> 48 & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15323) dd->oui3 = dd->base_guid >> 40 & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15325) ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15326) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15327) goto bail_clear_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15329) thermal_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15331) ret = init_cntrs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15332) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15333) goto bail_clear_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15335) ret = init_rcverr(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15336) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15337) goto bail_free_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15339) init_completion(&dd->user_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15341) /* The user refcount starts with one to inidicate an active device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15342) atomic_set(&dd->user_refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15344) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15346) bail_free_rcverr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15347) free_rcverr(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15348) bail_free_cntrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15349) free_cntrs(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15350) bail_clear_intr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15351) hfi1_comp_vectors_clean_up(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15352) msix_clean_up_interrupts(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15353) bail_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15354) hfi1_netdev_free(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15355) hfi1_pcie_ddcleanup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15356) bail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15357) hfi1_free_devdata(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15358) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15359) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15362) static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15363) u32 dw_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15365) u32 delta_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15366) u32 current_egress_rate = ppd->current_egress_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15367) /* rates here are in units of 10^6 bits/sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15369) if (desired_egress_rate == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15370) return 0; /* shouldn't happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15372) if (desired_egress_rate >= current_egress_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15373) return 0; /* we can't help go faster, only slower */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15375) delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15376) egress_cycles(dw_len * 4, current_egress_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15378) return (u16)delta_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15381) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15382) * create_pbc - build a pbc for transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15383) * @flags: special case flags or-ed in built pbc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15384) * @srate: static rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15385) * @vl: vl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15386) * @dwlen: dword length (header words + data words + pbc words)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15388) * Create a PBC with the given flags, rate, VL, and length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15390) * NOTE: The PBC created will not insert any HCRC - all callers but one are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15391) * for verbs, which does not use this PSM feature. The lone other caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15392) * is for the diagnostic interface which calls this if the user does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15393) * supply their own PBC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15395) u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15396) u32 dw_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15398) u64 pbc, delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15400) if (unlikely(srate_mbs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15401) delay = delay_cycles(ppd, srate_mbs, dw_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15403) pbc = flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15404) | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15405) | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15406) | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15407) | (dw_len & PBC_LENGTH_DWS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15408) << PBC_LENGTH_DWS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15410) return pbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15413) #define SBUS_THERMAL 0x4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15414) #define SBUS_THERM_MONITOR_MODE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15416) #define THERM_FAILURE(dev, ret, reason) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15417) dd_dev_err((dd), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15418) "Thermal sensor initialization failed: %s (%d)\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15419) (reason), (ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15422) * Initialize the thermal sensor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15424) * After initialization, enable polling of thermal sensor through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15425) * SBus interface. In order for this to work, the SBus Master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15426) * firmware has to be loaded due to the fact that the HW polling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15427) * logic uses SBus interrupts, which are not supported with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15428) * default firmware. Otherwise, no data will be returned through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15429) * the ASIC_STS_THERM CSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15431) static int thermal_init(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15433) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15435) if (dd->icode != ICODE_RTL_SILICON ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15436) check_chip_resource(dd, CR_THERM_INIT, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15437) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15439) ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15440) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15441) THERM_FAILURE(dd, ret, "Acquire SBus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15442) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15445) dd_dev_info(dd, "Initializing thermal sensor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15446) /* Disable polling of thermal readings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15447) write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15448) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15449) /* Thermal Sensor Initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15450) /* Step 1: Reset the Thermal SBus Receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15451) ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15452) RESET_SBUS_RECEIVER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15453) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15454) THERM_FAILURE(dd, ret, "Bus Reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15455) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15457) /* Step 2: Set Reset bit in Thermal block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15458) ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15459) WRITE_SBUS_RECEIVER, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15460) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15461) THERM_FAILURE(dd, ret, "Therm Block Reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15462) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15464) /* Step 3: Write clock divider value (100MHz -> 2MHz) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15465) ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15466) WRITE_SBUS_RECEIVER, 0x32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15467) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15468) THERM_FAILURE(dd, ret, "Write Clock Div");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15469) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15471) /* Step 4: Select temperature mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15472) ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15473) WRITE_SBUS_RECEIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15474) SBUS_THERM_MONITOR_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15475) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15476) THERM_FAILURE(dd, ret, "Write Mode Sel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15477) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15479) /* Step 5: De-assert block reset and start conversion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15480) ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15481) WRITE_SBUS_RECEIVER, 0x2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15482) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15483) THERM_FAILURE(dd, ret, "Write Reset Deassert");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15484) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15486) /* Step 5.1: Wait for first conversion (21.5ms per spec) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15487) msleep(22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15489) /* Enable polling of thermal readings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15490) write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15492) /* Set initialized flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15493) ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15494) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15495) THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15497) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15498) release_chip_resource(dd, CR_SBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15499) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15502) static void handle_temp_err(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15504) struct hfi1_pportdata *ppd = &dd->pport[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15506) * Thermal Critical Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15507) * Put the device into forced freeze mode, take link down to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15508) * offline, and put DC into reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15510) dd_dev_emerg(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15511) "Critical temperature reached! Forcing device into freeze mode!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15512) dd->flags |= HFI1_FORCED_FREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15513) start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15515) * Shut DC down as much and as quickly as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15517) * Step 1: Take the link down to OFFLINE. This will cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15518) * 8051 to put the Serdes in reset. However, we don't want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15519) * go through the entire link state machine since we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15520) * shutdown ASAP. Furthermore, this is not a graceful shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15521) * but rather an attempt to save the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15522) * Code below is almost the same as quiet_serdes() but avoids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15523) * all the extra work and the sleeps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15525) ppd->driver_link_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15526) ppd->link_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15527) set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15528) PLS_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15530) * Step 2: Shutdown LCB and 8051
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15531) * After shutdown, do not restore DC_CFG_RESET value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15533) dc_shutdown(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15534) }