^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * QLogic iSCSI HBA Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2003-2013 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "ql4_def.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "ql4_version.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "ql4_glbl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "ql4_dbg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "ql4_inline.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return readl((void __iomem *)(ha->nx_pcibase + addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) writel(val, (void __iomem *)(ha->nx_pcibase + addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) uint32_t val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (val != addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) __func__, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) uint32_t *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ret_val = qla4_83xx_set_win_base(ha, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (ret_val == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *data = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) uint32_t data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ret_val = qla4_83xx_set_win_base(ha, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (ret_val == QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) __func__, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int lock_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) uint32_t lock_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) while (lock_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (lock_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) lock_owner = qla4_83xx_rd_reg(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) QLA83XX_FLASH_LOCK_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __func__, ha->func_num, lock_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Reading FLASH_UNLOCK register unlocks the Flash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) uint8_t *p_data, int u32_word_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) uint32_t u32_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) uint32_t addr = flash_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ret_val = qla4_83xx_flash_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (ret_val == QLA_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) goto exit_lock_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (addr & 0x03) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) goto exit_flash_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) for (i = 0; i < u32_word_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ret_val = qla4_83xx_wr_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) QLA83XX_FLASH_DIRECT_WINDOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) (addr & 0xFFFF0000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) goto exit_flash_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ret_val = qla4_83xx_rd_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) QLA83XX_FLASH_DIRECT_DATA(addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) &u32_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) goto exit_flash_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *(__le32 *)p_data = le32_to_cpu(u32_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) p_data = p_data + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) addr = addr + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) exit_flash_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) qla4_83xx_flash_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) exit_lock_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) uint32_t flash_addr, uint8_t *p_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int u32_word_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) uint32_t u32_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) uint32_t flash_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) uint32_t addr = flash_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (addr & 0x3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto exit_lockless_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) goto exit_lockless_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Check if data is spread across multiple sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Multi sector read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) for (i = 0; i < u32_word_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ret_val = qla4_83xx_rd_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) QLA83XX_FLASH_DIRECT_DATA(addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) &u32_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) goto exit_lockless_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *(__le32 *)p_data = le32_to_cpu(u32_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) p_data = p_data + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) addr = addr + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) flash_offset = flash_offset + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* This write is needed once for each sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ret_val = qla4_83xx_wr_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) QLA83XX_FLASH_DIRECT_WINDOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto exit_lockless_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) flash_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* Single sector read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) for (i = 0; i < u32_word_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ret_val = qla4_83xx_rd_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) QLA83XX_FLASH_DIRECT_DATA(addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) &u32_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) goto exit_lockless_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *(__le32 *)p_data = le32_to_cpu(u32_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) p_data = p_data + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) addr = addr + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) exit_lockless_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (qla4_83xx_flash_lock(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * We got the lock, or someone else is holding the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * since we are restting, forcefully unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) qla4_83xx_flash_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #define INTENT_TO_RECOVER 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #define PROCEED_TO_RECOVER 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) uint32_t lock = 0, lockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Check for other Recovery in progress, go wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if ((lockid & 0x3) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) goto exit_lock_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Intent to Recover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) (ha->func_num << 2) | INTENT_TO_RECOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) msleep(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* Check Intent to Recover is advertised */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if ((lockid & 0x3C) != (ha->func_num << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto exit_lock_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) __func__, ha->func_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Proceed to Recover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) (ha->func_num << 2) | PROCEED_TO_RECOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Force Unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Clear bits 0-5 in IDC_RECOVERY register*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Get lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) exit_lock_recovery:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #define QLA83XX_DRV_LOCK_MSLEEP 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) uint32_t status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) uint32_t first_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) uint32_t tmo_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) uint32_t lock_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) uint32_t func_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) uint32_t lock_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) while (status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Increment Counter (8-31) and update func_num (0-7) on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * getting a successful lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (timeout == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Save counter + ID of function holding the lock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * first failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) first_owner = ha->isp_ops->rd_reg_direct(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) QLA83XX_DRV_LOCK_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (++timeout >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) func_num = tmo_owner & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) lock_cnt = tmo_owner >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) __func__, ha->func_num, func_num, lock_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) (first_owner & 0xFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (first_owner != tmo_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* Some other driver got lock, OR same driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * got lock again (counter value changed), when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * we were waiting for lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Retry for another 2 sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) __func__, ha->func_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Same driver holding lock > 2sec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Force Recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ret_val = qla4_83xx_lock_recovery(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ret_val == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Recovered and got lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) __func__, ha->func_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Recovery Failed, some other function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * has the lock, wait for 2secs and retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) __func__, ha->func_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) msleep(QLA83XX_DRV_LOCK_MSLEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if ((id & 0xFF) != ha->func_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) __func__, ha->func_num, (id & 0xFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* Keep lock counter value, update the ha->func_num to 0xFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) uint32_t idc_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) idc_ctrl |= DONTRESET_BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) idc_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) uint32_t idc_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) idc_ctrl &= ~DONTRESET_BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) idc_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) uint32_t idc_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return idc_ctrl & DONTRESET_BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*-------------------------IDC State Machine ---------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) UNKNOWN_CLASS = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) NIC_CLASS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) FCOE_CLASS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ISCSI_CLASS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct device_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int func_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) uint32_t drv_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) uint32_t dev_part, dev_part1, dev_part2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct device_info device_map[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int func_nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int nic_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int iscsi_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int iscsi_func_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* Use the dev_partition register to determine the PCI function number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * and then check drv_active register to see which driver is loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) dev_part1 = qla4_83xx_rd_reg(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* Each function has 4 bits in dev_partition Info register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Lower 2 bits - device type, Upper 2 bits - physical port number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) dev_part = dev_part1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) for (i = nibble = 0; i <= 15; i++, nibble++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) func_nibble = dev_part & (0xF << (nibble * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) func_nibble >>= (nibble * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) device_map[i].func_num = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) device_map[i].device_type = func_nibble & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) device_map[i].port_num = func_nibble & 0xC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (device_map[i].device_type == NIC_CLASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (drv_active & (1 << device_map[i].func_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) nic_present++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else if (device_map[i].device_type == ISCSI_CLASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (drv_active & (1 << device_map[i].func_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!iscsi_present ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) (iscsi_present &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) (iscsi_func_low > device_map[i].func_num)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) iscsi_func_low = device_map[i].func_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) iscsi_present++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* For function_num[8..15] get info from dev_part2 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (nibble == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) nibble = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dev_part = dev_part2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!nic_present && (ha->func_num == iscsi_func_low)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) "%s: can reset - NIC not present and lower iSCSI function is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) __func__, ha->func_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * qla4_83xx_need_reset_handler - Code to start reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * @ha: pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * Note: IDC lock must be held upon entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) uint32_t dev_state, drv_state, drv_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned long reset_timeout, dev_init_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) __func__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) qla4_8xxx_set_rst_ready(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* Non-reset owners ACK Reset and wait for device INIT state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * as part of Reset Recovery by Reset Owner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (time_after_eq(jiffies, dev_init_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ha->isp_ops->idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ha->isp_ops->idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dev_state = qla4_8xxx_rd_direct(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) QLA8XXX_CRB_DEV_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) } while (dev_state == QLA8XXX_DEV_NEED_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) qla4_8xxx_set_rst_ready(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) __func__, drv_state, drv_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) while (drv_state != drv_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (time_after_eq(jiffies, reset_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) __func__, DRIVER_NAME, drv_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) drv_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ha->isp_ops->idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ha->isp_ops->idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) drv_state = qla4_8xxx_rd_direct(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) QLA8XXX_CRB_DRV_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) drv_active = qla4_8xxx_rd_direct(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) QLA8XXX_CRB_DRV_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (drv_state != drv_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) __func__, (drv_active ^ drv_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) drv_active = drv_active & drv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) drv_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Start Reset Recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) qla4_8xxx_device_bootstrap(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) uint32_t idc_params, ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) (uint8_t *)&idc_params, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (ret_val == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ha->nx_dev_init_timeout = idc_params & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) DEBUG2(ql4_printk(KERN_DEBUG, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) __func__, ha->nx_dev_init_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ha->nx_reset_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*-------------------------Reset Sequence Functions-----------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) uint8_t *phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!ha->reset_tmplt.buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) phdr = ha->reset_tmplt.buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) *(phdr+13), *(phdr+14), *(phdr+15)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) uint8_t *p_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) uint32_t src, count, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) uint64_t dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) src = QLA83XX_BOOTLOADER_FLASH_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* 128 bit alignment check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (size & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) size = (size + 16) & ~0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* 16 byte count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) count = size/16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) p_cache = vmalloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (p_cache == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto exit_copy_bootloader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) size / sizeof(uint32_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) goto exit_copy_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) __func__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* 128 bit/16 byte write to MS memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto exit_copy_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) __func__, size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) exit_copy_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) vfree(p_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) exit_copy_bootloader:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) uint32_t val, ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (val == PHAN_INITIALIZE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) "%s: Command Peg initialization complete. State=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) __func__, val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) msleep(CRB_CMDPEG_CHECK_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) } while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * value read ANDed with test_mask is equal to test_result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * @ha : Pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * @addr : CRB register address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * @duration : Poll for total of "duration" msecs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * @test_mask : Mask value read with "test_mask"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * @test_result : Compare (value&test_mask) with test_result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) int duration, uint32_t test_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) uint32_t test_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) uint32_t value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) uint8_t retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (ret_val == QLA_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) goto exit_poll_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) retries = duration / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if ((value & test_mask) != test_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) msleep(duration / 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (ret_val == QLA_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) goto exit_poll_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) } while (retries--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) exit_poll_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ha->reset_tmplt.seq_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) __func__, value, test_mask, test_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) uint32_t sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) while (u16_count-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sum += *buff++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) while (sum >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) sum = (sum & 0xFFFF) + (sum >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* checksum of 0 indicates a valid template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (~sum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * qla4_83xx_read_reset_template - Read Reset Template from Flash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * @ha: Pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) uint8_t *p_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) uint32_t ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ha->reset_tmplt.seq_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (ha->reset_tmplt.buff == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto exit_read_reset_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) p_buff = ha->reset_tmplt.buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) addr = QLA83XX_RESET_TEMPLATE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) sizeof(uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) "%s: Read template hdr size %d from Flash\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) __func__, tmplt_hdr_def_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* Copy template header from flash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) tmplt_hdr_def_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (ret_val != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) goto exit_read_template_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ha->reset_tmplt.hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* Validate the template header size and signature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) __func__, tmplt_hdr_size, tmplt_hdr_def_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) goto exit_read_template_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) "%s: Read rest of the template size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) __func__, ha->reset_tmplt.hdr->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Copy rest of the template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) tmplt_hdr_def_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (ret_val != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto exit_read_template_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* Integrity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (qla4_83xx_reset_seq_checksum_test(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) goto exit_read_template_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) __func__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* Get STOP, START, INIT sequence offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ha->reset_tmplt.hdr->init_seq_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ha->reset_tmplt.hdr->start_seq_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) ha->reset_tmplt.hdr->hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) qla4_83xx_dump_reset_seq_hdr(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) goto exit_read_reset_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) exit_read_template_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) vfree(ha->reset_tmplt.buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) exit_read_reset_template:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * @ha : Pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * @raddr : CRB address to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * @waddr : CRB address to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) uint32_t raddr, uint32_t waddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) uint32_t value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) qla4_83xx_rd_reg_indirect(ha, raddr, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) qla4_83xx_wr_reg_indirect(ha, waddr, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * This function read value from raddr, AND with test_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @ha : Pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * @raddr : CRB address to read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * @waddr : CRB address to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * @p_rmw_hdr : header with shift/or/xor values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) uint32_t waddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct qla4_83xx_rmw *p_rmw_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) uint32_t value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (p_rmw_hdr->index_a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) qla4_83xx_rd_reg_indirect(ha, raddr, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) value &= p_rmw_hdr->test_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) value <<= p_rmw_hdr->shl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) value >>= p_rmw_hdr->shr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) value |= p_rmw_hdr->or_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) value ^= p_rmw_hdr->xor_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) qla4_83xx_wr_reg_indirect(ha, waddr, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static void qla4_83xx_write_list(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct qla4_83xx_entry *p_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) p_entry = (struct qla4_83xx_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) for (i = 0; i < p_hdr->count; i++, p_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (p_hdr->delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) udelay((uint32_t)(p_hdr->delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct qla4_83xx_entry *p_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) p_entry = (struct qla4_83xx_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) for (i = 0; i < p_hdr->count; i++, p_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (p_hdr->delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) udelay((uint32_t)(p_hdr->delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct qla4_83xx_entry *p_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct qla4_83xx_poll *p_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) uint32_t value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) p_poll = (struct qla4_83xx_poll *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* Entries start after 8 byte qla4_83xx_poll, poll header contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * the test_mask, test_value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) sizeof(struct qla4_83xx_poll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) delay = (long)p_hdr->delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (!delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) for (i = 0; i < p_hdr->count; i++, p_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) p_poll->test_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) p_poll->test_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) for (i = 0; i < p_hdr->count; i++, p_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) p_poll->test_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) p_poll->test_value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct qla4_83xx_quad_entry *p_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct qla4_83xx_poll *p_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) p_poll = (struct qla4_83xx_poll *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) p_entry = (struct qla4_83xx_quad_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ((char *)p_poll + sizeof(struct qla4_83xx_poll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) delay = (long)p_hdr->delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) for (i = 0; i < p_hdr->count; i++, p_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) p_entry->dr_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) p_entry->ar_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) p_poll->test_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) p_poll->test_value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) __func__, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) ha->reset_tmplt.seq_index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct qla4_83xx_entry *p_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct qla4_83xx_rmw *p_rmw_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) p_rmw_hdr = (struct qla4_83xx_rmw *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) p_entry = (struct qla4_83xx_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) for (i = 0; i < p_hdr->count; i++, p_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) p_rmw_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (p_hdr->delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) udelay((uint32_t)(p_hdr->delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static void qla4_83xx_pause(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (p_hdr->delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) mdelay((uint32_t)((long)p_hdr->delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct qla4_83xx_quad_entry *p_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct qla4_83xx_poll *p_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) uint32_t value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) p_poll = (struct qla4_83xx_poll *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) p_entry = (struct qla4_83xx_quad_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ((char *)p_poll + sizeof(struct qla4_83xx_poll));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) delay = (long)p_hdr->delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) for (i = 0; i < p_hdr->count; i++, p_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) p_entry->ar_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) p_poll->test_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) p_poll->test_value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) __func__, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ha->reset_tmplt.seq_index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) index = ha->reset_tmplt.array_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ha->reset_tmplt.array[index++] = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ha->reset_tmplt.array_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ha->reset_tmplt.seq_end = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static void qla4_83xx_template_end(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct qla4_83xx_reset_entry_hdr *p_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ha->reset_tmplt.template_end = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (ha->reset_tmplt.seq_error == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) "%s: Reset sequence completed SUCCESSFULLY.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) __func__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * qla4_83xx_process_reset_template - Process reset template.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * Process all entries in reset template till entry with SEQ_END opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * which indicates end of the reset template processing. Each entry has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * Reset Entry header, entry opcode/command, with size of the entry, number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * @ha : Pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * @p_buff : Common reset entry header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) char *p_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int index, entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct qla4_83xx_reset_entry_hdr *p_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) char *p_entry = p_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) ha->reset_tmplt.seq_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ha->reset_tmplt.template_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) entries = ha->reset_tmplt.hdr->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) index = ha->reset_tmplt.seq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) switch (p_hdr->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) case OPCODE_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case OPCODE_WRITE_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) qla4_83xx_write_list(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) case OPCODE_READ_WRITE_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) qla4_83xx_read_write_list(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) case OPCODE_POLL_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) qla4_83xx_poll_list(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) case OPCODE_POLL_WRITE_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) qla4_83xx_poll_write_list(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) case OPCODE_READ_MODIFY_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) qla4_83xx_read_modify_write(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) case OPCODE_SEQ_PAUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) qla4_83xx_pause(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) case OPCODE_SEQ_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) qla4_83xx_seq_end(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) case OPCODE_TMPL_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) qla4_83xx_template_end(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) case OPCODE_POLL_READ_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) qla4_83xx_poll_read_list(ha, p_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) __func__, p_hdr->cmd, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* Set pointer to next entry in the sequence. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) p_entry += p_hdr->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ha->reset_tmplt.seq_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ha->reset_tmplt.seq_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ha->reset_tmplt.seq_end != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (ha->reset_tmplt.template_end != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (ha->reset_tmplt.seq_end != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static int qla4_83xx_restart(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) uint32_t idc_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) qla4_83xx_process_stop_seq(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Collect minidump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * don't collect minidump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (idc_ctrl & GRACEFUL_RESET_BIT1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) (idc_ctrl & ~GRACEFUL_RESET_BIT1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) qla4_8xxx_get_minidump(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) qla4_83xx_process_init_seq(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (qla4_83xx_copy_bootloader(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) ret_val = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) goto exit_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) qla4_83xx_process_start_seq(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) exit_restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) int ret_val = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) ret_val = qla4_83xx_restart(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (ret_val == QLA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) goto exit_start_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) __func__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) ret_val = qla4_83xx_check_cmd_peg_status(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (ret_val == QLA_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) exit_start_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /*----------------------Interrupt Related functions ---------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) qla4_8xxx_intr_disable(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) uint32_t mb_int, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ret = readl(&ha->qla4_83xx_reg->mbox_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) mb_int = ret & ~INT_ENABLE_FW_MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) writel(1, &ha->qla4_83xx_reg->leg_int_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) qla4_83xx_disable_mbox_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) qla4_83xx_disable_iocb_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) qla4_8xxx_intr_enable(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) uint32_t mb_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) mb_int = INT_ENABLE_FW_MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) writel(0, &ha->qla4_83xx_reg->leg_int_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) qla4_83xx_enable_mbox_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) qla4_83xx_enable_iocb_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) int incount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Load all mailbox registers, except mailbox 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) for (i = 1; i < incount; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /* Set Host Interrupt register to 1, to tell the firmware that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * a mailbox command is pending. Firmware after reading the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * mailbox command, clears the host interrupt register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) int intr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (intr_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) ha->mbox_status_count = outcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ha->isp_ops->interrupt_service_routine(ha, intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * @ha: pointer to host adapter structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) uint32_t dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) ha->isp_ops->idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (ql4xdontresethba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) qla4_83xx_set_idc_dontreset(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (dev_state == QLA8XXX_DEV_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) rval = QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) goto exit_isp_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) __func__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) QLA8XXX_DEV_NEED_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* If device_state is NEED_RESET, go ahead with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * Reset,irrespective of ql4xdontresethba. This is to allow a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * non-reset-owner to force a reset. Non-reset-owner sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * and then forces a Reset by setting device_state to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * NEED_RESET. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) "%s: HW state already set to NEED_RESET\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) __func__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * priority and which drivers are present. Unlike ISP8022, the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * setting NEED_RESET, may not be the Reset owner. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (qla4_83xx_can_perform_reset(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) set_bit(AF_8XXX_RST_OWNER, &ha->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) ha->isp_ops->idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) rval = qla4_8xxx_device_state_handler(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ha->isp_ops->idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) qla4_8xxx_clear_rst_ready(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) exit_isp_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ha->isp_ops->idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (rval == QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) clear_bit(AF_FW_RECOVERY, &ha->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) u32 val = 0, val1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* Port 0 Rx Buffer Pause Threshold Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) qla4_83xx_rd_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) DEBUG2(pr_info("0x%x ", val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) DEBUG2(pr_info("\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /* Port 1 Rx Buffer Pause Threshold Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) qla4_83xx_rd_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) DEBUG2(pr_info("0x%x ", val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) DEBUG2(pr_info("\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /* Port 0 RxB Traffic Class Max Cell Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) qla4_83xx_rd_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) DEBUG2(pr_info("0x%x ", val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) DEBUG2(pr_info("\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Port 1 RxB Traffic Class Max Cell Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) qla4_83xx_rd_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) DEBUG2(pr_info("0x%x ", val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) DEBUG2(pr_info("\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* Port 0 RxB Rx Traffic Class Stats. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) for (i = 7; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) (val | (i << 29)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) DEBUG2(pr_info("0x%x ", val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) DEBUG2(pr_info("\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /* Port 1 RxB Rx Traffic Class Stats. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) for (i = 7; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) (val | (i << 29)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) DEBUG2(pr_info("0x%x ", val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) DEBUG2(pr_info("\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, &val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) DEBUG2(ql4_printk(KERN_INFO, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) val, val1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /* set SRE-Shim Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) QLA83XX_SET_PAUSE_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* Port 0 Rx Buffer Pause Threshold Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) qla4_83xx_wr_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) QLA83XX_SET_PAUSE_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /* Port 1 Rx Buffer Pause Threshold Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) qla4_83xx_wr_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) QLA83XX_SET_PAUSE_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /* Port 0 RxB Traffic Class Max Cell Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) qla4_83xx_wr_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) QLA83XX_SET_TC_MAX_CELL_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* Port 1 RxB Traffic Class Max Cell Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) qla4_83xx_wr_reg_indirect(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) QLA83XX_SET_TC_MAX_CELL_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) QLA83XX_SET_PAUSE_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) QLA83XX_SET_PAUSE_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * qla4_83xx_eport_init - Initialize EPort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * @ha: Pointer to host adapter structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * If EPort hardware is in reset state before disabling pause, there would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * serious hardware wedging issues. To prevent this perform eport init everytime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * before disabling pause frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /* Clear the 8 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /* Write any value to Reset Control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ha->isp_ops->idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* Before disabling pause frames, ensure that eport is not in reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) qla4_83xx_eport_init(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) qla4_83xx_dump_pause_control_regs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) __qla4_83xx_disable_pause(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ha->isp_ops->idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * qla4_83xx_is_detached - Check if we are marked invisible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * @ha: Pointer to host adapter structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) int qla4_83xx_is_detached(struct scsi_qla_host *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) uint32_t drv_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (test_bit(AF_INIT_DONE, &ha->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) !(drv_active & (1 << ha->func_num))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) __func__, drv_active));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) return QLA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }