^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2015 - 2017 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Make it easy to toggle firmware file name and if it gets loaded by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * editing the following. This may be something we do while in development
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * but not necessarily something a user would ever need to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DEFAULT_FW_8051_NAME_FPGA "hfi_dc8051.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DEFAULT_FW_8051_NAME_ASIC "hfi1_dc8051.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static uint fw_8051_load = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static uint fw_fabric_serdes_load = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static uint fw_pcie_serdes_load = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static uint fw_sbus_load = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Firmware file names get set in hfi1_firmware_init() based on the above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static char *fw_8051_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static char *fw_fabric_serdes_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static char *fw_sbus_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static char *fw_pcie_serdes_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SBUS_MAX_POLL_COUNT 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SBUS_COUNTER(reg, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) (((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Firmware security header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct css_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 module_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 header_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u32 module_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u32 module_vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u32 date; /* BCD yyyymmdd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u32 size; /* in DWORDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) u32 key_size; /* in DWORDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 modulus_size; /* in DWORDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 exponent_size; /* in DWORDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u32 reserved[22];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* expected field values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define CSS_MODULE_TYPE 0x00000006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define CSS_HEADER_LEN 0x000000a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define CSS_HEADER_VERSION 0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define CSS_MODULE_VENDOR 0x00008086
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define KEY_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define MU_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define EXPONENT_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* size of platform configuration partition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* size of file of plaform configuration encoded in format version 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* the file itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct firmware_file {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct css_header css_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u8 modulus[KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u8 exponent[EXPONENT_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u8 signature[KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u8 firmware[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct augmented_firmware_file {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct css_header css_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u8 modulus[KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u8 exponent[EXPONENT_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u8 signature[KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u8 r2[KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u8 mu[MU_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u8 firmware[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* augmented file size difference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) sizeof(struct firmware_file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct firmware_details {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Linux core piece */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct css_header *css_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u8 *firmware_ptr; /* pointer to binary data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u32 firmware_len; /* length in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u8 *modulus; /* pointer to the modulus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u8 *exponent; /* pointer to the exponent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u8 *signature; /* pointer to the signature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u8 *r2; /* pointer to r2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u8 *mu; /* pointer to mu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct augmented_firmware_file dummy_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * The mutex protects fw_state, fw_err, and all of the firmware_details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * variables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static DEFINE_MUTEX(fw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) enum fw_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) FW_EMPTY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) FW_TRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) FW_FINAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) FW_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static enum fw_state fw_state = FW_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int fw_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static struct firmware_details fw_8051;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static struct firmware_details fw_fabric;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static struct firmware_details fw_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static struct firmware_details fw_sbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* flags for turn_off_spicos() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define SPICO_SBUS 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define SPICO_FABRIC 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define ENABLE_SPICO_SMASK 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* security block commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define RSA_CMD_INIT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define RSA_CMD_START 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* security block status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define RSA_STATUS_IDLE 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define RSA_STATUS_ACTIVE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define RSA_STATUS_DONE 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define RSA_STATUS_FAILED 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* RSA engine timeout, in ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define RSA_ENGINE_TIMEOUT 100 /* ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* hardware mutex timeout, in ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define HM_TIMEOUT 10 /* ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* 8051 memory access timeout, in us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define DC8051_ACCESS_TIMEOUT 100 /* us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* the number of fabric SerDes on the SBus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define NUM_FABRIC_SERDES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* ASIC_STS_SBUS_RESULT.RESULT_CODE value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define SBUS_READ_COMPLETE 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* SBus fabric SerDes addresses, one set per HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) { 0x01, 0x02, 0x03, 0x04 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) { 0x28, 0x29, 0x2a, 0x2b }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* SBus PCIe SerDes addresses, one set per HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) { 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) { 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* SBus PCIe PCS addresses, one set per HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) { 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) { 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* SBus fabric SerDes broadcast addresses, one per HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static const u8 all_fabric_serdes_broadcast = 0xe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* SBus PCIe SerDes broadcast addresses, one per HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static const u8 all_pcie_serdes_broadcast = 0xe0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) SYSTEM_TABLE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) PORT_TABLE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) RX_PRESET_TABLE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) TX_PRESET_TABLE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) QSFP_ATTEN_TABLE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) VARIABLE_SETTINGS_TABLE_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* forwards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void dispose_one_firmware(struct firmware_details *fdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct firmware_details *fdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void dump_fw_version(struct hfi1_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Read a single 64-bit value from 8051 data memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Expects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * o caller to have already set up data read, no auto increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * o caller to turn off read enable when finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * The address argument is a byte offset. Bits 0:2 in the address are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * ignored - i.e. the hardware will always do aligned 8-byte reads as if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * the lower bits are zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Return 0 on success, -ENXIO on a read error (timeout).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* step 1: set the address, clear enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* step 2: enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* wait until ACCESS_COMPLETED is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (count > DC8051_ACCESS_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) dd_dev_err(dd, "timeout reading 8051 data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ndelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* gather the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * Read 8051 data starting at addr, for len bytes. Will read in 8-byte chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Return 0 on success, -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u32 done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) spin_lock_irqsave(&dd->dc8051_memlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* data read set-up, no auto-increment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) for (done = 0; done < len; addr += 8, done += 8, result++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = __read_8051_data(dd, addr, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* turn off read enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) spin_unlock_irqrestore(&dd->dc8051_memlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * Write data or code to the 8051 code or data RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static int write_8051(struct hfi1_devdata *dd, int code, u32 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) const u8 *data, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int aligned, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* check alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) aligned = ((unsigned long)data & 0x7) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* write set-up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) | DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) | DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) for (offset = 0; offset < len; offset += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int bytes = len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (bytes < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) memcpy(®, &data[offset], bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) } else if (aligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) reg = *(u64 *)&data[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) memcpy(®, &data[offset], 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* wait until ACCESS_COMPLETED is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (count > DC8051_ACCESS_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dd_dev_err(dd, "timeout writing 8051 data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* turn off write access, auto increment (also sets to data access) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* return 0 if values match, non-zero and complain otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int invalid_header(struct hfi1_devdata *dd, const char *what,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) u32 actual, u32 expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (actual == expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) what, expected, actual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * Verify that the static fields in the CSS header match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* verify CSS header fields (most sizes are in DW, so add /4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (invalid_header(dd, "module_type", css->module_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) CSS_MODULE_TYPE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) invalid_header(dd, "header_len", css->header_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) (sizeof(struct firmware_file) / 4)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) invalid_header(dd, "header_version", css->header_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) CSS_HEADER_VERSION) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) invalid_header(dd, "module_vendor", css->module_vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) CSS_MODULE_VENDOR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) invalid_header(dd, "modulus_size", css->modulus_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) KEY_SIZE / 4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) invalid_header(dd, "exponent_size", css->exponent_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) EXPONENT_SIZE / 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Make sure there are at least some bytes after the prefix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static int payload_check(struct hfi1_devdata *dd, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) long file_size, long prefix_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* make sure we have some payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (prefix_size >= file_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) name, file_size, prefix_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * Request the firmware from the system. Extract the pieces and fill in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * fdet. If successful, the caller will need to call dispose_one_firmware().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Returns 0 on success, -ERRNO on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct firmware_details *fdet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct css_header *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) memset(fdet, 0, sizeof(*fdet));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* verify the firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (fdet->fw->size < sizeof(struct css_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dd_dev_err(dd, "firmware \"%s\" is too small\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) css = (struct css_header *)fdet->fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) hfi1_cdbg(FIRMWARE, "Firmware %s details:", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) hfi1_cdbg(FIRMWARE, "CSS structure:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) hfi1_cdbg(FIRMWARE, " module_type 0x%x", css->module_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) hfi1_cdbg(FIRMWARE, " header_len 0x%03x (0x%03x bytes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) css->header_len, 4 * css->header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) hfi1_cdbg(FIRMWARE, " header_version 0x%x", css->header_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) hfi1_cdbg(FIRMWARE, " module_id 0x%x", css->module_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) hfi1_cdbg(FIRMWARE, " module_vendor 0x%x", css->module_vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) hfi1_cdbg(FIRMWARE, " date 0x%x", css->date);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) hfi1_cdbg(FIRMWARE, " size 0x%03x (0x%03x bytes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) css->size, 4 * css->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) hfi1_cdbg(FIRMWARE, " key_size 0x%03x (0x%03x bytes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) css->key_size, 4 * css->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) hfi1_cdbg(FIRMWARE, " modulus_size 0x%03x (0x%03x bytes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) css->modulus_size, 4 * css->modulus_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) hfi1_cdbg(FIRMWARE, " exponent_size 0x%03x (0x%03x bytes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) css->exponent_size, 4 * css->exponent_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) fdet->fw->size - sizeof(struct firmware_file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * If the file does not have a valid CSS header, fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Otherwise, check the CSS size field for an expected size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * The augmented file has r2 and mu inserted after the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * was generated, so there will be a known difference between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * the CSS header size and the actual file size. Use this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * difference to identify an augmented file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * Note: css->size is in DWORDs, multiply by 4 to get bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ret = verify_css_header(dd, css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) } else if ((css->size * 4) == fdet->fw->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* non-augmented firmware file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct firmware_file *ff = (struct firmware_file *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) fdet->fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* make sure there are bytes in the payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ret = payload_check(dd, name, fdet->fw->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) sizeof(struct firmware_file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) fdet->css_header = css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) fdet->modulus = ff->modulus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) fdet->exponent = ff->exponent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) fdet->signature = ff->signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) fdet->r2 = fdet->dummy_header.r2; /* use dummy space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) fdet->mu = fdet->dummy_header.mu; /* use dummy space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) fdet->firmware_ptr = ff->firmware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) fdet->firmware_len = fdet->fw->size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) sizeof(struct firmware_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Header does not include r2 and mu - generate here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * For now, fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* augmented firmware file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct augmented_firmware_file *aff =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) (struct augmented_firmware_file *)fdet->fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* make sure there are bytes in the payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ret = payload_check(dd, name, fdet->fw->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) sizeof(struct augmented_firmware_file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) fdet->css_header = css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) fdet->modulus = aff->modulus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) fdet->exponent = aff->exponent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) fdet->signature = aff->signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) fdet->r2 = aff->r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) fdet->mu = aff->mu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) fdet->firmware_ptr = aff->firmware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) fdet->firmware_len = fdet->fw->size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) sizeof(struct augmented_firmware_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* css->size check failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) fdet->fw->size / 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) (fdet->fw->size - AUGMENT_SIZE) / 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) css->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* if returning an error, clean up after ourselves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dispose_one_firmware(fdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static void dispose_one_firmware(struct firmware_details *fdet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) release_firmware(fdet->fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* erase all previous information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) memset(fdet, 0, sizeof(*fdet));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Obtain the 4 firmwares from the OS. All must be obtained at once or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * at all. If called with the firmware state in FW_TRY, use alternate names.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * On exit, this routine will have set the firmware state to one of FW_TRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * FW_FINAL, or FW_ERR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * Must be holding fw_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static void __obtain_firmware(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (fw_state == FW_FINAL) /* nothing more to obtain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (fw_state == FW_ERR) /* already in error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* fw_state is FW_EMPTY or FW_TRY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (fw_state == FW_TRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * We tried the original and it failed. Move to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * alternate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dd_dev_warn(dd, "using alternate firmware names\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * Let others run. Some systems, when missing firmware, does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * something that holds for 30 seconds. If we do that twice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * in a row it triggers task blocked warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (fw_8051_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dispose_one_firmware(&fw_8051);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (fw_fabric_serdes_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) dispose_one_firmware(&fw_fabric);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (fw_sbus_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dispose_one_firmware(&fw_sbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (fw_pcie_serdes_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) dispose_one_firmware(&fw_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) fw_8051_name = ALT_FW_8051_NAME_ASIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) fw_fabric_serdes_name = ALT_FW_FABRIC_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) fw_sbus_name = ALT_FW_SBUS_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Add a delay before obtaining and loading debug firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * Authorization will fail if the delay between firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * authorization events is shorter than 50us. Add 100us to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * make a delay time safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) usleep_range(100, 120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (fw_sbus_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (fw_pcie_serdes_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (fw_fabric_serdes_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) err = obtain_one_firmware(dd, fw_fabric_serdes_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) &fw_fabric);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (fw_8051_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* oops, had problems obtaining a firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* retry with alternate (RTL only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) fw_state = FW_TRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dd_dev_err(dd, "unable to obtain working firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) fw_state = FW_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) fw_err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (fw_state == FW_EMPTY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) fw_state = FW_TRY; /* may retry later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) fw_state = FW_FINAL; /* cannot try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * Called by all HFIs when loading their firmware - i.e. device probe time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * The first one will do the actual firmware load. Use a mutex to resolve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * any possible race condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * The call to this routine cannot be moved to driver load because the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * call request_firmware() requires a device which is only available after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * the first device probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static int obtain_firmware(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) mutex_lock(&fw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* 40s delay due to long delay on missing firmware on some systems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) timeout = jiffies + msecs_to_jiffies(40000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) while (fw_state == FW_TRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * Another device is trying the firmware. Wait until it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * decides what works (or not).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* waited too long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dd_dev_err(dd, "Timeout waiting for firmware try");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) fw_state = FW_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) fw_err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) mutex_unlock(&fw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) msleep(20); /* arbitrary delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) mutex_lock(&fw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* not in FW_TRY state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (fw_state == FW_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) __obtain_firmware(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mutex_unlock(&fw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return fw_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * Called when the driver unloads. The timing is asymmetric with its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * counterpart, obtain_firmware(). If called at device remove time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * then it is conceivable that another device could probe while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * firmware is being disposed. The mutexes can be moved to do that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * safely, but then the firmware would be requested from the OS multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * No mutex is needed as the driver is unloading and there cannot be any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * other callers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) void dispose_firmware(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dispose_one_firmware(&fw_8051);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dispose_one_firmware(&fw_fabric);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dispose_one_firmware(&fw_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) dispose_one_firmware(&fw_sbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* retain the error state, otherwise revert to empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (fw_state != FW_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) fw_state = FW_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * Called with the result of a firmware download.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * Return 1 to retry loading the firmware, 0 to stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static int retry_firmware(struct hfi1_devdata *dd, int load_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) mutex_lock(&fw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (load_result == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * The load succeeded, so expect all others to do the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * Do not retry again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (fw_state == FW_TRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) fw_state = FW_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) retry = 0; /* do NOT retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) } else if (fw_state == FW_TRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* load failed, obtain alternate firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) __obtain_firmware(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) retry = (fw_state == FW_FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* else in FW_FINAL or FW_ERR, no retry in either case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mutex_unlock(&fw_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * Write a block of data to a given array CSR. All calls will be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * multiples of 8 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static void write_rsa_data(struct hfi1_devdata *dd, int what,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) const u8 *data, int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int qw_size = nbytes / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (((unsigned long)data & 0x7) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) u64 *ptr = (u64 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) for (i = 0; i < qw_size; i++, ptr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) write_csr(dd, what + (8 * i), *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* not aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) for (i = 0; i < qw_size; i++, data += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) memcpy(&value, data, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) write_csr(dd, what + (8 * i), value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * Write a block of data to a given CSR as a stream of writes. All calls will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * be in multiples of 8 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) const u8 *data, int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u64 *ptr = (u64 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int qw_size = nbytes / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) for (; qw_size > 0; qw_size--, ptr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) write_csr(dd, what, *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * Download the signature and start the RSA mechanism. Wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * RSA_ENGINE_TIMEOUT before giving up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) static int run_rsa(struct hfi1_devdata *dd, const char *who,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) const u8 *signature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* write the signature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* initialize RSA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * Make sure the engine is idle and insert a delay between the two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * writes to MISC_CFG_RSA_CMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) status = (read_csr(dd, MISC_CFG_FW_CTRL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (status != RSA_STATUS_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dd_dev_err(dd, "%s security engine not idle - giving up\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) who);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* start RSA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Look for the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * The RSA engine is hooked up to two MISC errors. The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * masks these errors as they do not respond to the standard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * error "clear down" mechanism. Look for these errors here and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * clear them when possible. This routine will exit with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * errors of the current run still set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * MISC_FW_AUTH_FAILED_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * Firmware authorization failed. This can be cleared by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * re-initializing the RSA engine, then clearing the status bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * Do not re-init the RSA angine immediately after a successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * run - this will reset the current authorization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * MISC_KEY_MISMATCH_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * Key does not match. The only way to clear this is to load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * a matching key then clear the status bit. If this error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * is raised, it will persist outside of this routine until a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * matching key is loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) status = (read_csr(dd, MISC_CFG_FW_CTRL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (status == RSA_STATUS_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* should not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dd_dev_err(dd, "%s firmware security bad idle state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) who);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) } else if (status == RSA_STATUS_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* finished successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) } else if (status == RSA_STATUS_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* finished unsuccessfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* else still active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Timed out while active. We can't reset the engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * if it is stuck active, but run through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * error code to see what error bits are set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dd_dev_err(dd, "%s firmware security time out\n", who);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * Arrive here on success or failure. Clear all RSA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * errors. All current errors will stick - the RSA logic is keeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * error high. All previous errors will clear - the RSA logic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * is not keeping the error high.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) write_csr(dd, MISC_ERR_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * All that is left are the current errors. Print warnings on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * authorization failure details, if any. Firmware authorization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * can be retried, so these are only warnings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) reg = read_csr(dd, MISC_ERR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) dd_dev_warn(dd, "%s firmware authorization failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) who);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) dd_dev_warn(dd, "%s firmware key mismatch\n", who);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static void load_security_variables(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct firmware_details *fdet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* Security variables a. Write the modulus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* Security variables b. Write the r2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* Security variables c. Write the mu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* Security variables d. Write the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) (u8 *)fdet->css_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) sizeof(struct css_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /* return the 8051 firmware state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static inline u32 get_firmware_state(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) & DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * Wait until the firmware is up and ready to take host requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * Return 0 on success, -ETIMEDOUT on timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* in the simulator, the fake 8051 is always ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) timeout = msecs_to_jiffies(mstimeout) + jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (get_firmware_state(dd) == 0xa0) /* ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (time_after(jiffies, timeout)) /* timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) usleep_range(1950, 2050); /* sleep 2ms-ish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * Load the 8051 firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static int load_8051_firmware(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct firmware_details *fdet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) u8 ver_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) u8 ver_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) u8 ver_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * DC Reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * Load DC 8051 firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * DC reset step 1: Reset DC8051
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) reg = DC_DC8051_CFG_RST_M8051W_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) | DC_DC8051_CFG_RST_CRAM_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) | DC_DC8051_CFG_RST_DRAM_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) | DC_DC8051_CFG_RST_IRAM_SMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) | DC_DC8051_CFG_RST_SFR_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) write_csr(dd, DC_DC8051_CFG_RST, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * DC reset step 2 (optional): Load 8051 data memory with link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * DC reset step 3: Load DC8051 firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* release all but the core reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) reg = DC_DC8051_CFG_RST_M8051W_SMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) write_csr(dd, DC_DC8051_CFG_RST, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* Firmware load step 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) load_security_variables(dd, fdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * Firmware load step 2. Clear MISC_CFG_FW_CTRL.FW_8051_LOADED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) write_csr(dd, MISC_CFG_FW_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* Firmware load steps 3-5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) fdet->firmware_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * DC reset step 4. Host starts the DC8051 firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * Firmware load step 6. Set MISC_CFG_FW_CTRL.FW_8051_LOADED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* Firmware load steps 7-10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) ret = run_rsa(dd, "8051", fdet->signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* clear all reset bits, releasing the 8051 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) write_csr(dd, DC_DC8051_CFG_RST, 0ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * DC reset step 5. Wait for firmware to be ready to accept host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ret = wait_fm_ready(dd, TIMEOUT_8051_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (ret) { /* timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) get_firmware_state(dd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) (int)ver_major, (int)ver_minor, (int)ver_patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (ret != HCMD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) "Failed to set host interface version, return 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * Write the SBus request register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * No need for masking - the arguments are sized exactly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) void sbus_request(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) write_csr(dd, ASIC_CFG_SBUS_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ((u64)receiver_addr <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * Read a value from the SBus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * Requires the caller to be in fast mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) u32 data_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) int success = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) u32 result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) u32 result_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) for (retries = 0; retries < 100; retries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) usleep_range(1000, 1200); /* arbitrary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) & ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (result_code != SBUS_READ_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) success = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (!success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) result_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * Turn off the SBus and fabric serdes spicos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * + Must be called with Sbus fast mode turned on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * + Must be called after fabric serdes broadcast is set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * + Must be called before the 8051 is loaded - assumes 8051 is not loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * when using MISC_CFG_FW_CTRL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* only needed on A0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (!is_ax(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) dd_dev_info(dd, "Turning off spicos:%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) flags & SPICO_SBUS ? " SBus" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) flags & SPICO_FABRIC ? " fabric" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* disable SBus spico */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (flags & SPICO_SBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) WRITE_SBUS_RECEIVER, 0x00000040);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* disable the fabric serdes spicos */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (flags & SPICO_FABRIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 0x07, WRITE_SBUS_RECEIVER, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) write_csr(dd, MISC_CFG_FW_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * Reset all of the fabric serdes for this HFI in preparation to take the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * link to Polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * To do a reset, we need to write to to the serdes registers. Unfortunately,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * the fabric serdes download to the other HFI on the ASIC will have turned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * off the firmware validation on this HFI. This means we can't write to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * registers to reset the serdes. Work around this by performing a complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * re-download and validation of the fabric serdes firmware. This, as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * by-product, will reset the serdes. NOTE: the re-download requires that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * the 8051 be in the Offline state. I.e. not actively trying to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * serdes. This routine is called at the point where the link is Offline and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * is getting ready to go to Polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) void fabric_serdes_reset(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (!fw_fabric_serdes_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) set_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (is_ax(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /* A0 serdes do not work with a re-download */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) u8 ra = fabric_serdes_broadcast[dd->hfi1_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* place SerDes in reset and disable SPICO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* wait 100 refclk cycles @ 156.25MHz => 640ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* remove SerDes reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* turn SPICO enable on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) turn_off_spicos(dd, SPICO_FABRIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * No need for firmware retry - what to download has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * been decided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * No need to pay attention to the load return - the only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * failure is a validation failure, which has already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * checked by the initial download.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) (void)load_fabric_serdes_firmware(dd, &fw_fabric);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) clear_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) release_chip_resource(dd, CR_SBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* Access to the SBus in this routine should probably be serialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int sbus_request_slow(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) u64 reg, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* make sure fast mode is clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) clear_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) sbus_request(dd, receiver_addr, data_addr, command, data_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* Wait for both DONE and RCV_DATA_VALID to go high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (count++ >= SBUS_MAX_POLL_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * If the loop has timed out, we are OK if DONE bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * is set and RCV_DATA_VALID and EXECUTE counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * are the same. If not, we cannot proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) (SBUS_COUNTER(counts, RCV_DATA_VALID) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) SBUS_COUNTER(counts, EXECUTE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* Wait for DONE to clear after EXECUTE is cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (count++ >= SBUS_MAX_POLL_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct firmware_details *fdet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) dd_dev_info(dd, "Downloading fabric firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /* step 1: load security variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) load_security_variables(dd, fdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* step 2: place SerDes in reset and disable SPICO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* wait 100 refclk cycles @ 156.25MHz => 640ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /* step 3: remove SerDes reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* step 4: assert IMEM override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* step 5: download SerDes machine code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) for (i = 0; i < fdet->firmware_len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) *(u32 *)&fdet->firmware_ptr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /* step 6: IMEM override off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* step 7: turn ECC on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* steps 8-11: run the RSA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) err = run_rsa(dd, "fabric serdes", fdet->signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* step 12: turn SPICO enable on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* step 13: enable core hardware interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static int load_sbus_firmware(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) struct firmware_details *fdet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) dd_dev_info(dd, "Downloading SBus firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /* step 1: load security variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) load_security_variables(dd, fdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* step 2: place SPICO into reset and enable off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /* step 3: remove reset, enable off, IMEM_CNTRL_EN on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* step 4: set starting IMEM address for burst download */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /* step 5: download the SBus Master machine code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) for (i = 0; i < fdet->firmware_len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) *(u32 *)&fdet->firmware_ptr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* step 6: set IMEM_CNTL_EN off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* step 7: turn ECC on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* steps 8-11: run the RSA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) err = run_rsa(dd, "SBus", fdet->signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* step 12: set SPICO_ENABLE on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct firmware_details *fdet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) dd_dev_info(dd, "Downloading PCIe firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* step 1: load security variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) load_security_variables(dd, fdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /* step 2: assert single step (halts the SBus Master spico) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /* step 3: enable XDMEM access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* step 4: load firmware into SBus Master XDMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * NOTE: the dmem address, write_en, and wdata are all pre-packed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * we only need to pick up the bytes and write them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) for (i = 0; i < fdet->firmware_len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) *(u32 *)&fdet->firmware_ptr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* step 5: disable XDMEM access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /* step 6: allow SBus Spico to run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * steps 7-11: run RSA, if it succeeds, firmware is available to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * be swapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return run_rsa(dd, "PCIe serdes", fdet->signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * Set the given broadcast values on the given list of devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) const u8 *addrs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) while (--count >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * defaults for everything else. Do not read-modify-write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * per instruction from the manufacturer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * Register 0xfd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * bits what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * ----- ---------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * 0 IGNORE_BROADCAST (default 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * 11:4 BROADCAST_GROUP_1 (default 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * 23:16 BROADCAST_GROUP_2 (default 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) (u32)bg1 << 4 | (u32)bg2 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) int acquire_hw_mutex(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) int try = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) u8 mask = 1 << dd->hfi1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (user == mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) "Hardware mutex already acquired, mutex mask %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) (u32)mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) write_csr(dd, ASIC_CFG_MUTEX, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (user == mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return 0; /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (time_after(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) break; /* timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /* timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (try == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /* break mutex and retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) write_csr(dd, ASIC_CFG_MUTEX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) try++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) void release_hw_mutex(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) u8 mask = 1 << dd->hfi1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (user != mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) dd_dev_warn(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) "Unable to release hardware mutex, mutex mask %u, my mask %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) (u32)user, (u32)mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) write_csr(dd, ASIC_CFG_MUTEX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /* return the given resource bit(s) as a mask for the given HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static inline u64 resource_mask(u32 hfi1_id, u32 resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static void fail_mutex_acquire_message(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) "%s: hardware mutex stuck - suggest rebooting the machine\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * Acquire access to a chip resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) u64 scratch0, all_bits, my_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (resource & CR_DYN_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* a dynamic resource is in use if either HFI has set the bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) (resource & (CR_I2C1 | CR_I2C2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* discrete devices must serialize across both chains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) all_bits = resource_mask(0, CR_I2C1 | CR_I2C2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) resource_mask(1, CR_I2C1 | CR_I2C2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) all_bits = resource_mask(0, resource) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) resource_mask(1, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) my_bit = resource_mask(dd->hfi1_id, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* non-dynamic resources are not split between HFIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) all_bits = resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) my_bit = resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* lock against other callers within the driver wanting a resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) mutex_lock(&dd->asic_data->asic_resource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) ret = acquire_hw_mutex(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) fail_mutex_acquire_message(dd, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (scratch0 & all_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /* force write to be visible to other HFI on another OS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) (void)read_csr(dd, ASIC_CFG_SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) release_hw_mutex(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) mutex_unlock(&dd->asic_data->asic_resource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * Acquire access to a chip resource, wait up to mswait milliseconds for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * the resource to become available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * acquire failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) timeout = jiffies + msecs_to_jiffies(mswait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) ret = __acquire_chip_resource(dd, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (ret != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) /* resource is busy, check our timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (time_after_eq(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) usleep_range(80, 120); /* arbitrary delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * Release access to a chip resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) void release_chip_resource(struct hfi1_devdata *dd, u32 resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) u64 scratch0, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /* only dynamic resources should ever be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (!(resource & CR_DYN_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) bit = resource_mask(dd->hfi1_id, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* lock against other callers within the driver wanting a resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) mutex_lock(&dd->asic_data->asic_resource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (acquire_hw_mutex(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) fail_mutex_acquire_message(dd, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if ((scratch0 & bit) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) scratch0 &= ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /* force write to be visible to other HFI on another OS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) (void)read_csr(dd, ASIC_CFG_SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) __func__, dd->hfi1_id, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) release_hw_mutex(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) mutex_unlock(&dd->asic_data->asic_resource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * Return true if resource is set, false otherwise. Print a warning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * if not set and a function is supplied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) u64 scratch0, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (resource & CR_DYN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) bit = resource_mask(dd->hfi1_id, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) bit = resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if ((scratch0 & bit) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) dd_dev_warn(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) "%s: id %d, resource 0x%x, not acquired!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) func, dd->hfi1_id, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static void clear_chip_resources(struct hfi1_devdata *dd, const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) u64 scratch0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* lock against other callers within the driver wanting a resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) mutex_lock(&dd->asic_data->asic_resource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (acquire_hw_mutex(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) fail_mutex_acquire_message(dd, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /* clear all dynamic access bits for this HFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /* force write to be visible to other HFI on another OS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) (void)read_csr(dd, ASIC_CFG_SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) release_hw_mutex(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) mutex_unlock(&dd->asic_data->asic_resource_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) void init_chip_resources(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) /* clear any holds left by us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) clear_chip_resources(dd, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) void finish_chip_resources(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) /* clear any holds left by us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) clear_chip_resources(dd, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) void set_sbus_fast_mode(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) void clear_sbus_fast_mode(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) u64 reg, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) while (SBUS_COUNTER(reg, EXECUTE) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) SBUS_COUNTER(reg, RCV_DATA_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (count++ >= SBUS_MAX_POLL_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) int load_firmware(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (fw_fabric_serdes_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) set_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) fabric_serdes_broadcast[dd->hfi1_id],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) fabric_serdes_addrs[dd->hfi1_id],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) NUM_FABRIC_SERDES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) turn_off_spicos(dd, SPICO_FABRIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) ret = load_fabric_serdes_firmware(dd, &fw_fabric);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) } while (retry_firmware(dd, ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) clear_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) release_chip_resource(dd, CR_SBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (fw_8051_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) ret = load_8051_firmware(dd, &fw_8051);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) } while (retry_firmware(dd, ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) dump_fw_version(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) int hfi1_firmware_init(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* only RTL can use these */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (dd->icode != ICODE_RTL_SILICON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) fw_fabric_serdes_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) fw_pcie_serdes_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) fw_sbus_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /* no 8051 or QSFP on simulator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) fw_8051_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (!fw_8051_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (dd->icode == ICODE_RTL_SILICON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) fw_8051_name = DEFAULT_FW_8051_NAME_ASIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) fw_8051_name = DEFAULT_FW_8051_NAME_FPGA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (!fw_fabric_serdes_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (!fw_sbus_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) fw_sbus_name = DEFAULT_FW_SBUS_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (!fw_pcie_serdes_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return obtain_firmware(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * This function is a helper function for parse_platform_config(...) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * does not check for validity of the platform configuration cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * (because we know it is invalid as we are building up the cache).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * As such, this should not be called from anywhere other than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * parse_platform_config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (!system_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) meta_ver_meta =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) + SYSTEM_TABLE_META_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) ver_start = meta_ver_meta & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) ver_len = meta_ver_meta & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ver_start /= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (meta_ver < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) dd_dev_info(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) dd, "%s:Please update platform config\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) int parse_platform_config(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) u32 *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) int ret = -EINVAL; /* assume failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * For integrated devices that did not fall back to the default file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * the SI tuning information for active channels is acquired from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * scratch register bitmap, thus there is no platform config to parse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * Skip parsing in these situations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (ppd->config_from_scratch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (!dd->platform_config.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) dd_dev_err(dd, "%s: Missing config file\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) ptr = (u32 *)dd->platform_config.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) magic_num = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) dd_dev_err(dd, "%s: Bad config file\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) /* Field is file size in DWORDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) file_length = (*ptr) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * Length can't be larger than partition size. Assume platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * config format version 4 is being used. Interpret the file size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * field as header instead by not moving the pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) "%s:File length out of bounds, using alternative format\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (file_length > dd->platform_config.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) dd_dev_info(dd, "%s:File claims to be larger than read size\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) } else if (file_length < dd->platform_config.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) "%s:File claims to be smaller than read size, continuing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) /* exactly equal, perfection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * In both cases where we proceed, using the self-reported file length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * is the safer option. In case of old format a predefined value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * being used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) header1 = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) header2 = *(ptr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (header1 != ~header2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) __func__, (ptr - (u32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) dd->platform_config.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) record_idx = *ptr &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) ((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) table_length_dwords = (*ptr >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) ((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* Done with this set of headers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) ptr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (record_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /* data table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) switch (table_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) case PLATFORM_CONFIG_SYSTEM_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) pcfgcache->config_tables[table_type].num_table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) ret = check_meta_version(dd, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) case PLATFORM_CONFIG_PORT_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) pcfgcache->config_tables[table_type].num_table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) case PLATFORM_CONFIG_RX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) case PLATFORM_CONFIG_TX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) pcfgcache->config_tables[table_type].num_table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) table_length_dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) "%s: Unknown data table %d, offset %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) __func__, table_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) (ptr - (u32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) dd->platform_config.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) goto bail; /* We don't trust this file now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) pcfgcache->config_tables[table_type].table = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /* metadata table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) switch (table_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) case PLATFORM_CONFIG_SYSTEM_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) case PLATFORM_CONFIG_PORT_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) case PLATFORM_CONFIG_RX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) case PLATFORM_CONFIG_TX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) dd_dev_err(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) "%s: Unknown meta table %d, offset %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) __func__, table_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) (ptr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) (u32 *)dd->platform_config.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) goto bail; /* We don't trust this file now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) pcfgcache->config_tables[table_type].table_metadata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /* Calculate and check table crc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) crc = crc32_le(~(u32)0, (unsigned char const *)ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) (table_length_dwords * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) crc ^= ~(u32)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* Jump the table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) ptr += table_length_dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (crc != *ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) __func__, (ptr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) (u32 *)dd->platform_config.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /* Jump the CRC DWORD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) pcfgcache->cache_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) memset(pcfgcache, 0, sizeof(struct platform_config_cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) static void get_integrated_platform_config_field(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) enum platform_config_table_type_encoding table_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) int field_index, u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) u8 *cache = ppd->qsfp_info.cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) u32 tx_preset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) switch (table_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) case PLATFORM_CONFIG_SYSTEM_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) *data = ppd->max_power_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) *data = ppd->default_atten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) case PLATFORM_CONFIG_PORT_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (field_index == PORT_TABLE_PORT_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) *data = ppd->port_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) *data = ppd->local_atten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) *data = ppd->remote_atten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) case PLATFORM_CONFIG_RX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) *data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) QSFP_RX_CDR_APPLY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) *data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) QSFP_RX_EMP_APPLY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) *data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) QSFP_RX_AMP_APPLY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) *data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) QSFP_RX_CDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) *data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) QSFP_RX_EMP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) *data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) QSFP_RX_AMP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) case PLATFORM_CONFIG_TX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (cache[QSFP_EQ_INFO_OFFS] & 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) tx_preset = ppd->tx_preset_eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) tx_preset = ppd->tx_preset_noeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (field_index == TX_PRESET_TABLE_PRECUR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) *data = (tx_preset & TX_PRECUR_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) TX_PRECUR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) else if (field_index == TX_PRESET_TABLE_ATTN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) *data = (tx_preset & TX_ATTN_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) TX_ATTN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) else if (field_index == TX_PRESET_TABLE_POSTCUR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) *data = (tx_preset & TX_POSTCUR_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) TX_POSTCUR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) *data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) QSFP_TX_CDR_APPLY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) *data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) QSFP_TX_EQ_APPLY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) *data = (tx_preset & QSFP_TX_CDR_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) QSFP_TX_CDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) *data = (tx_preset & QSFP_TX_EQ_SMASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) QSFP_TX_EQ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) int field, u32 *field_len_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) u32 *field_start_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) u32 *src_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (!pcfgcache->cache_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) switch (table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) case PLATFORM_CONFIG_SYSTEM_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) case PLATFORM_CONFIG_PORT_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) case PLATFORM_CONFIG_RX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) case PLATFORM_CONFIG_TX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (field && field < platform_config_table_limits[table])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) src_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) pcfgcache->config_tables[table].table_metadata + field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) dd_dev_info(dd, "%s: Unknown table\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) if (!src_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (field_start_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) *field_start_bits = *src_ptr &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (field_len_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) *field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) /* This is the central interface to getting data out of the platform config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * file. It depends on parse_platform_config() having populated the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * platform_config_cache in hfi1_devdata, and checks the cache_valid member to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * validate the sanity of the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * The non-obvious parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * @table_index: Acts as a look up key into which instance of the tables the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * relevant field is fetched from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) * This applies to the data tables that have multiple instances. The port table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * is an exception to this rule as each HFI only has one port and thus the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * relevant table can be distinguished by hfi_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * @data: pointer to memory that will be populated with the field requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * @len: length of memory pointed by @data in bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) int get_platform_config_field(struct hfi1_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) enum platform_config_table_type_encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) table_type, int table_index, int field_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) u32 *data, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) int ret = 0, wlen = 0, seek = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) struct hfi1_pportdata *ppd = dd->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) memset(data, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (ppd->config_from_scratch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * Use saved configuration from ppd for integrated platforms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) get_integrated_platform_config_field(dd, table_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) field_index, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ret = get_platform_fw_field_metadata(dd, table_type, field_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) &field_len_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) &field_start_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) /* Convert length to bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) len *= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* Our metadata function checked cache_valid and field_index for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) switch (table_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) case PLATFORM_CONFIG_SYSTEM_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) src_ptr = pcfgcache->config_tables[table_type].table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (len < field_len_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) seek = field_start_bits / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) wlen = field_len_bits / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) src_ptr = (u32 *)((u8 *)src_ptr + seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * We expect the field to be byte aligned and whole byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) * lengths if we are here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) memcpy(data, src_ptr, wlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) case PLATFORM_CONFIG_PORT_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /* Port table is 4 DWORDS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) src_ptr = dd->hfi1_id ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) pcfgcache->config_tables[table_type].table + 4 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) pcfgcache->config_tables[table_type].table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) case PLATFORM_CONFIG_RX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) case PLATFORM_CONFIG_TX_PRESET_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) src_ptr = pcfgcache->config_tables[table_type].table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (table_index <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) pcfgcache->config_tables[table_type].num_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) src_ptr += table_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) src_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) dd_dev_info(dd, "%s: Unknown table\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (!src_ptr || len < field_len_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) src_ptr += (field_start_bits / 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) *data = (*src_ptr >> (field_start_bits % 32)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) ((1 << field_len_bits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * Download the firmware needed for the Gen3 PCIe SerDes. An update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * to the SBus firmware is needed before updating the PCIe firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * Note: caller must be holding the SBus resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) int load_pcie_firmware(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /* both firmware loads below use the SBus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) set_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (fw_sbus_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) turn_off_spicos(dd, SPICO_SBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ret = load_sbus_firmware(dd, &fw_sbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) } while (retry_firmware(dd, ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (fw_pcie_serdes_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) pcie_serdes_broadcast[dd->hfi1_id],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) pcie_serdes_addrs[dd->hfi1_id],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) NUM_PCIE_SERDES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) ret = load_pcie_serdes_firmware(dd, &fw_pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) } while (retry_firmware(dd, ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) clear_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) * Read the GUID from the hardware, store it in dd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) void read_guid(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) /* Take the DC out of reset to get a valid GUID value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) write_csr(dd, CCE_DC_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) (void)read_csr(dd, CCE_DC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) dd_dev_info(dd, "GUID %llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) (unsigned long long)dd->base_guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) /* read and display firmware version info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) static void dump_fw_version(struct hfi1_devdata *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) u32 pcie_vers[NUM_PCIE_SERDES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) u32 fabric_vers[NUM_FABRIC_SERDES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) u32 sbus_vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) int all_same;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) u8 rcv_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) /* set fast mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) set_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) /* read version for SBus Master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /* wait for interrupt to be processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) usleep_range(10000, 11000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) /* read version for PCIe SerDes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) all_same = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) pcie_vers[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) for (i = 0; i < NUM_PCIE_SERDES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) /* wait for interrupt to be processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) usleep_range(10000, 11000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (i > 0 && pcie_vers[0] != pcie_vers[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) all_same = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (all_same) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) pcie_vers[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) for (i = 0; i < NUM_PCIE_SERDES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) "PCIe SerDes lane %d firmware version 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) i, pcie_vers[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) /* read version for fabric SerDes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) all_same = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) fabric_vers[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) for (i = 0; i < NUM_FABRIC_SERDES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /* wait for interrupt to be processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) usleep_range(10000, 11000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (i > 0 && fabric_vers[0] != fabric_vers[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) all_same = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (all_same) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) fabric_vers[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) for (i = 0; i < NUM_FABRIC_SERDES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) dd_dev_info(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) "Fabric SerDes lane %d firmware version 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) i, fabric_vers[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) clear_sbus_fast_mode(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) release_chip_resource(dd, CR_SBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }