^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * QLogic Fibre Channel HBA Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2003-2014 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "qla_def.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /* QLAFX00 specific Mailbox implementation functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * qlafx00_mailbox_command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Issue mailbox command and waits for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * mcp = driver internal mbx struct pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * 0 : QLA_SUCCESS = cmd performed success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * 1 : QLA_FUNCTION_FAILED (error encountered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Kernel context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) device_reg_t *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) uint8_t abort_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) uint8_t io_lock_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) uint16_t command = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) uint32_t *iptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __le32 __iomem *optr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) uint32_t cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) uint32_t mboxes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long wait_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (ha->pdev->error_state == pci_channel_io_perm_failure) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ql_log(ql_log_warn, vha, 0x115c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) "PCI channel failed permanently, exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return QLA_FUNCTION_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (vha->device_flags & DFLG_DEV_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ql_log(ql_log_warn, vha, 0x115f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) "Device in failed state, exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return QLA_FUNCTION_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) reg = ha->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) io_lock_on = base_vha->flags.init_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (ha->flags.pci_channel_io_perm_failure) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ql_log(ql_log_warn, vha, 0x1175,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) "Perm failure on EEH timeout MBX, exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return QLA_FUNCTION_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (ha->flags.isp82xx_fw_hung) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Setting Link-Down error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) mcp->mb[0] = MBS_LINK_DOWN_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ql_log(ql_log_warn, vha, 0x1176,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) goto premature_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Wait for active mailbox commands to finish by waiting at most tov
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * seconds. This is to serialize actual issuing of mailbox cmds during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * non ISP abort time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Timeout occurred. Return error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ql_log(ql_log_warn, vha, 0x1177,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) "Cmd access timeout, cmd=0x%x, Exiting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) mcp->mb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return QLA_FUNCTION_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ha->flags.mbox_busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Save mailbox command for debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ha->mcp32 = mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ql_dbg(ql_dbg_mbx, vha, 0x1178,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Load mailbox registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) optr = ®->ispfx00.mailbox0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) iptr = mcp->mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) command = mcp->mb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mboxes = mcp->out_mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) for (cnt = 0; cnt < ha->mbx_count; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (mboxes & BIT_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) wrt_reg_dword(optr, *iptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mboxes >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) optr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) iptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Issue set host interrupt command to send cmd out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ha->flags.mbox_int = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) (uint8_t *)mcp->mb, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ((uint8_t *)mcp->mb + 0x10), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ((uint8_t *)mcp->mb + 0x20), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Unlock mbx registers and wait for interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ql_dbg(ql_dbg_mbx, vha, 0x1179,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "Going to unlock irq & waiting for interrupts. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) "jiffies=%lx.\n", jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Wait for mbx cmd completion until timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) mcp->tov * HZ) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ql_dbg(ql_dbg_mbx, vha, 0x112c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) "Cmd=%x Polling Mode.\n", command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) while (!ha->flags.mbox_int) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (time_after(jiffies, wait_time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Check for pending interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) qla2x00_poll(ha->rsp_q_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!ha->flags.mbox_int &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) !(IS_QLA2200(ha) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) command == MBC_LOAD_RISC_RAM_EXTENDED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) usleep_range(10000, 11000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) } /* while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ql_dbg(ql_dbg_mbx, vha, 0x112d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) "Waited %d sec.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* Check whether we timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (ha->flags.mbox_int) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) uint32_t *iptr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ql_dbg(ql_dbg_mbx, vha, 0x112e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "Cmd=%x completed.\n", command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Got interrupt. Clear the flag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ha->flags.mbox_int = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Load return mailbox registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) iptr2 = mcp->mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) iptr = (uint32_t *)&ha->mailbox_out32[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mboxes = mcp->in_mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) for (cnt = 0; cnt < ha->mbx_count; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (mboxes & BIT_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *iptr2 = *iptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mboxes >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) iptr2++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) iptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) rval = QLA_FUNCTION_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ha->flags.mbox_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Clean up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ha->mcp32 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ql_dbg(ql_dbg_mbx, vha, 0x113a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) "checking for additional resp interrupt.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* polling mode for non isp_abort commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) qla2x00_poll(ha->rsp_q_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (rval == QLA_FUNCTION_TIMEOUT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ha->flags.eeh_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* not in dpc. schedule it for dpc to take over. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ql_dbg(ql_dbg_mbx, vha, 0x115d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) "Timeout, schedule isp_abort_needed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ql_log(ql_log_info, base_vha, 0x115e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) "Mailbox cmd timeout occurred, cmd=0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) "abort.\n", command, mcp->mb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ha->flags.eeh_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) } else if (!abort_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* call abort directly since we are in the DPC thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ql_dbg(ql_dbg_mbx, vha, 0x1160,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) "Timeout, calling abort_isp.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ql_log(ql_log_info, base_vha, 0x1161,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) "Mailbox cmd timeout occurred, cmd=0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) "mb[0]=0x%x. Scheduling ISP abort ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) command, mcp->mb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (ha->isp_ops->abort_isp(vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Failed. retry later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) set_bit(ISP_ABORT_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ql_dbg(ql_dbg_mbx, vha, 0x1162,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) "Finished abort_isp.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) premature_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Allow next mbx cmd to come in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) complete(&ha->mbx_cmd_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ql_log(ql_log_warn, base_vha, 0x1163,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) "**** Failed=%x mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * qlafx00_driver_shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Indicate a driver shutdown to firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * local function return status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * Kernel context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct mbx_cmd_32 mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct mbx_cmd_32 *mcp = &mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) "Entered %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mcp->out_mb = MBX_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mcp->in_mb = MBX_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (tmo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) mcp->tov = tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mcp->tov = MBX_TOV_SECONDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mcp->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rval = qlafx00_mailbox_command(vha, mcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ql_dbg(ql_dbg_mbx, vha, 0x1167,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) "Failed=%x.\n", rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) "Done %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * qlafx00_get_firmware_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Get adapter firmware state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * TARGET_QUEUE_LOCK must be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * ADAPTER_STATE_LOCK must be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * qla7xxx local function return status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Kernel context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct mbx_cmd_32 mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct mbx_cmd_32 *mcp = &mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) "Entered %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mcp->out_mb = MBX_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) mcp->in_mb = MBX_1|MBX_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mcp->tov = MBX_TOV_SECONDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) mcp->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rval = qlafx00_mailbox_command(vha, mcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Return firmware states. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) states[0] = mcp->mb[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ql_dbg(ql_dbg_mbx, vha, 0x116a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) "Done %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * qlafx00_init_firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Initialize adapter firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * dptr = Initialization control block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * size = size of initialization control block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * TARGET_QUEUE_LOCK must be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * ADAPTER_STATE_LOCK must be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * qlafx00 local function return status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Kernel context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct mbx_cmd_32 mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct mbx_cmd_32 *mcp = &mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) "Entered %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) mcp->mb[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mcp->mb[2] = MSD(ha->init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mcp->mb[3] = LSD(ha->init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mcp->in_mb = MBX_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) mcp->buf_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) mcp->flags = MBX_DMA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) mcp->tov = MBX_TOV_SECONDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) rval = qlafx00_mailbox_command(vha, mcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ql_dbg(ql_dbg_mbx, vha, 0x116d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) "Done %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * qlafx00_mbx_reg_test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct mbx_cmd_32 mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct mbx_cmd_32 *mcp = &mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) "Entered %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) mcp->mb[1] = 0xAAAA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) mcp->mb[2] = 0x5555;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mcp->mb[3] = 0xAA55;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) mcp->mb[4] = 0x55AA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mcp->mb[5] = 0xA5A5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mcp->mb[6] = 0x5A5A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mcp->mb[7] = 0x2525;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) mcp->mb[8] = 0xBBBB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) mcp->mb[9] = 0x6666;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mcp->mb[10] = 0xBB66;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mcp->mb[11] = 0x66BB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) mcp->mb[12] = 0xB6B6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mcp->mb[13] = 0x6B6B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) mcp->mb[14] = 0x3636;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) mcp->mb[15] = 0xCCCC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) mcp->buf_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) mcp->flags = MBX_DMA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) mcp->tov = MBX_TOV_SECONDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rval = qlafx00_mailbox_command(vha, mcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (rval == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mcp->mb[31] != 0xCCCC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ql_dbg(ql_dbg_mbx, vha, 0x1170,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) "Done %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @vha: HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) qlafx00_pci_config(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) uint16_t w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pci_set_master(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) pci_try_set_mwi(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) w &= ~PCI_COMMAND_INTX_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pci_write_config_word(ha->pdev, PCI_COMMAND, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* PCIe -- adjust Maximum Read Request Size (2048). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (pci_is_pcie(ha->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) pcie_set_readrq(ha->pdev, 2048);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ha->chip_revision = ha->pdev->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * @vha: HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int i, core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) uint32_t cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) uint32_t reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* stop the XOR DMA engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* stop the IDMA engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) reg_val &= ~(1<<12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) reg_val &= ~(1<<12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) reg_val &= ~(1<<12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) reg_val &= ~(1<<12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) for (i = 0; i < 100000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Set all 4 cores in reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) QLAFX00_SET_HBA_SOC_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) QLAFX00_SET_HBA_SOC_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Reset all units in Fabric */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Set all 4 core Memory Power Down Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) QLAFX00_SET_HBA_SOC_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Reset all interrupt control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) for (i = 0; i < 115; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) QLAFX00_SET_HBA_SOC_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Reset Timers control registers. per core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) for (core = 0; core < 4; core++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) QLAFX00_SET_HBA_SOC_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* Reset per core IRQ ack register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) for (core = 0; core < 4; core++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) QLAFX00_SET_HBA_SOC_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* Set Fabric control and config to defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Kick in Fabric units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Kick in Core0 to start boot process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Wait 10secs for soft-reset to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) for (cnt = 10; cnt; cnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * qlafx00_soft_reset() - Soft Reset ISPFx00.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * @vha: HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) qlafx00_soft_reset(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (unlikely(pci_channel_offline(ha->pdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ha->flags.pci_channel_io_perm_failure))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ha->isp_ops->disable_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) qlafx00_soc_cpu_reset(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * @vha: HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) qlafx00_chip_diag(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct req_que *req = ha->req_q_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) rval = qlafx00_mbx_reg_test(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ql_log(ql_log_warn, vha, 0x1165,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) "Failed mailbox send register test\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Flag a successful rval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) qlafx00_config_rings(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) wrt_reg_dword(®->req_q_in, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) wrt_reg_dword(®->req_q_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) wrt_reg_dword(®->rsp_q_in, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) wrt_reg_dword(®->rsp_q_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* PCI posting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rd_reg_dword(®->rsp_q_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (pci_is_pcie(ha->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) strlcpy(str, "PCIe iSA", str_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) snprintf(str, size, "%s", ha->mr.fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) qlafx00_enable_intrs(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ha->interrupts_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) QLAFX00_ENABLE_ICNTRL_REG(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) qlafx00_disable_intrs(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ha->interrupts_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) QLAFX00_DISABLE_ICNTRL_REG(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) qlafx00_iospace_config(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (pci_request_selected_regions(ha->pdev, ha->bars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) QLA2XXX_DRIVER_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Use MMIO operations for all accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) "Invalid pci I/O region size (%s).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) "Invalid PCI mem BAR0 region size (%s), aborting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ha->cregbase =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (!ha->cregbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) "region #2 not an MMIO resource (%s), aborting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) "Invalid PCI mem BAR2 region size (%s), aborting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ha->iobase =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!ha->iobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* Determine queue resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ha->max_req_queues = ha->max_rsp_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ql_log_pci(ql_log_info, ha->pdev, 0x012c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ha->bars, ha->cregbase, ha->iobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) iospace_error_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct req_que *req = ha->req_q_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct rsp_que *rsp = ha->rsp_q_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) req->length_fx00 = req->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) req->ring_fx00 = req->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) req->dma_fx00 = req->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rsp->length_fx00 = rsp->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) rsp->ring_fx00 = rsp->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) rsp->dma_fx00 = rsp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ql_dbg(ql_dbg_init, vha, 0x012d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) req->length_fx00, (u64)req->dma_fx00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ql_dbg(ql_dbg_init, vha, 0x012e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) rsp->length_fx00, (u64)rsp->dma_fx00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) qlafx00_config_queues(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct req_que *req = ha->req_q_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct rsp_que *rsp = ha->rsp_q_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) req->length = ha->req_que_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) req->ring = (void __force *)ha->iobase + ha->req_que_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) req->dma = bar2_hdl + ha->req_que_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if ((!req->ring) || (req->length == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ql_log_pci(ql_log_info, ha->pdev, 0x012f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) "Unable to allocate memory for req_ring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ql_dbg(ql_dbg_init, vha, 0x0130,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) "req: %p req_ring pointer %p req len 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) "req off 0x%x\n, req->dma: 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) req, req->ring, req->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ha->req_que_off, (u64)req->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) rsp->length = ha->rsp_que_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) rsp->dma = bar2_hdl + ha->rsp_que_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if ((!rsp->ring) || (rsp->length == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) ql_log_pci(ql_log_info, ha->pdev, 0x0131,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) "Unable to allocate memory for rsp_ring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) ql_dbg(ql_dbg_init, vha, 0x0132,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) "rsp: %p rsp_ring pointer %p rsp len 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) "rsp off 0x%x, rsp->dma: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) rsp, rsp->ring, rsp->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ha->rsp_que_off, (u64)rsp->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) qlafx00_init_fw_ready(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) unsigned long wtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) uint16_t wait_time; /* Wait time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) uint32_t aenmbx, aenmbx7 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) uint32_t pseudo_aen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) uint32_t state[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* 30 seconds wait - Adjust if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) wait_time = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) pseudo_aen = rd_reg_dword(®->pseudoaen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (pseudo_aen == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) aenmbx7 = rd_reg_dword(®->initval7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ha->mbx_intr_code = MSW(aenmbx7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ha->rqstq_intr_code = LSW(aenmbx7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) rval = qlafx00_driver_shutdown(vha, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (rval != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) qlafx00_soft_reset(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* wait time before firmware ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) wtime = jiffies + (wait_time * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) aenmbx = rd_reg_dword(®->aenmailbox0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ql_dbg(ql_dbg_mbx, vha, 0x0133,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) "aenmbx: 0x%x\n", aenmbx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) switch (aenmbx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) case MBA_FW_NOT_STARTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) case MBA_FW_STARTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) case MBA_SYSTEM_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) case MBA_REQ_TRANSFER_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) case MBA_RSP_TRANSFER_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) case MBA_FW_INIT_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) qlafx00_soft_reset(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) case MBA_FW_RESTART_CMPLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Set the mbx and rqstq intr code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) aenmbx7 = rd_reg_dword(®->aenmailbox7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ha->mbx_intr_code = MSW(aenmbx7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ha->rqstq_intr_code = LSW(aenmbx7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ha->req_que_off = rd_reg_dword(®->aenmailbox1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ha->rsp_que_off = rd_reg_dword(®->aenmailbox3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ha->req_que_len = rd_reg_dword(®->aenmailbox5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ha->rsp_que_len = rd_reg_dword(®->aenmailbox6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) wrt_reg_dword(®->aenmailbox0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) rd_reg_dword_relaxed(®->aenmailbox0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ql_dbg(ql_dbg_init, vha, 0x0134,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) "f/w returned mbx_intr_code: 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) "rqstq_intr_code: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ha->mbx_intr_code, ha->rqstq_intr_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* If fw is apparently not ready. In order to continue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * we might need to issue Mbox cmd, but the problem is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * that the DoorBell vector values that come with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * 8060 AEN are most likely gone by now (and thus no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * bell would be rung on the fw side when mbox cmd is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * issued). We have to therefore grab the 8060 AEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * shadow regs (filled in by FW when the last 8060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * AEN was being posted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Do the following to determine what is needed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * order to get the FW ready:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * 1. reload the 8060 AEN values from the shadow regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * 2. clear int status to get rid of possible pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * 3. issue Get FW State Mbox cmd to determine fw state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * Set the mbx and rqstq intr code from Shadow Regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) aenmbx7 = rd_reg_dword(®->initval7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ha->mbx_intr_code = MSW(aenmbx7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ha->rqstq_intr_code = LSW(aenmbx7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ha->req_que_off = rd_reg_dword(®->initval1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ha->rsp_que_off = rd_reg_dword(®->initval3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ha->req_que_len = rd_reg_dword(®->initval5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ha->rsp_que_len = rd_reg_dword(®->initval6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ql_dbg(ql_dbg_init, vha, 0x0135,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) "f/w returned mbx_intr_code: 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) "rqstq_intr_code: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ha->mbx_intr_code, ha->rqstq_intr_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* Get the FW state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) rval = qlafx00_get_firmware_state(vha, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Retry if timer has not expired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* Firmware is waiting to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * initialized by driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /* Issue driver shutdown and wait until f/w recovers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * Driver should continue to poll until 8060 AEN is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * received indicating firmware recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ql_dbg(ql_dbg_init, vha, 0x0136,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) "Sending Driver shutdown fw_state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) state[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) rval = qlafx00_driver_shutdown(vha, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) wtime = jiffies + (wait_time * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (time_after_eq(jiffies, wtime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ql_dbg(ql_dbg_init, vha, 0x0137,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) "Init f/w failed: aen[7]: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) rd_reg_dword(®->aenmailbox7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* Delay for a while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) } while (!done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ql_dbg(ql_dbg_init, vha, 0x0138,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) "%s **** FAILED ****.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ql_dbg(ql_dbg_init, vha, 0x0139,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) "%s **** SUCCESS ****.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * qlafx00_fw_ready() - Waits for firmware ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * @ha: HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) qlafx00_fw_ready(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) unsigned long wtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) uint16_t wait_time; /* Wait time if loop is coming ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) uint32_t state[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) wait_time = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* wait time before firmware ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) wtime = jiffies + (wait_time * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Wait for ISP to finish init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!vha->flags.init_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ql_dbg(ql_dbg_init, vha, 0x013a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) "Waiting for init to complete...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) rval = qlafx00_get_firmware_state(vha, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (rval == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (state[0] == FSTATE_FX00_INITIALIZED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ql_dbg(ql_dbg_init, vha, 0x013b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) "fw_state=%x\n", state[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (time_after_eq(jiffies, wtime))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* Delay for a while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ql_dbg(ql_dbg_init, vha, 0x013c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) "fw_state=%x curr time=%lx.\n", state[0], jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ql_dbg(ql_dbg_init, vha, 0x013d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) "Firmware ready **** FAILED ****.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ql_dbg(ql_dbg_init, vha, 0x013e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) "Firmware ready **** SUCCESS ****.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) qlafx00_find_all_targets(scsi_qla_host_t *vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct list_head *new_fcports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) uint16_t tgt_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) fc_port_t *fcport, *new_fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if ((atomic_read(&vha->loop_down_timer) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) STATE_TRANSITION(vha))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) atomic_set(&vha->loop_down_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) "Listing Target bit map...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ha->gid_list, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* Allocate temporary rmtport for any new rmtports discovered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (new_fcport == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return QLA_MEMORY_ALLOC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) for_each_set_bit(tgt_id, (void *)ha->gid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) QLAFX00_TGT_NODE_LIST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* Send get target node info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) new_fcport->tgt_id = tgt_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) rval = qlafx00_fx_disc(vha, new_fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) FXDISC_GET_TGT_NODE_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ql_log(ql_log_warn, vha, 0x208a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) "Target info scan failed -- assuming zero-entry "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) "result...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /* Locate matching device in database. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) list_for_each_entry(fcport, &vha->vp_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (memcmp(new_fcport->port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) fcport->port_name, WWN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * If tgt_id is same and state FCS_ONLINE, nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (fcport->tgt_id == new_fcport->tgt_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) atomic_read(&fcport->state) == FCS_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * Tgt ID changed or device was marked to be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) "TGT-ID Change(%s): Present tgt id: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) "0x%x state: 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) "wwnn = %llx wwpn = %llx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) __func__, fcport->tgt_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) atomic_read(&fcport->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) (unsigned long long)wwn_to_u64(fcport->node_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) (unsigned long long)wwn_to_u64(fcport->port_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ql_log(ql_log_info, vha, 0x208c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) "TGT-ID Announce(%s): Discovered tgt "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) "id 0x%x wwnn = %llx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) wwn_to_u64(new_fcport->node_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) wwn_to_u64(new_fcport->port_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (atomic_read(&fcport->state) != FCS_ONLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) fcport->old_tgt_id = fcport->tgt_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) fcport->tgt_id = new_fcport->tgt_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ql_log(ql_log_info, vha, 0x208d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) "TGT-ID: New fcport Added: %p\n", fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) qla2x00_update_fcport(vha, fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ql_log(ql_log_info, vha, 0x208e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) " Existing TGT-ID %x did not get "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) " offline event from firmware.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) fcport->old_tgt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) qla2x00_mark_device_lost(vha, fcport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) qla2x00_free_fcport(new_fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /* If device was not in our fcports list, then add it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) list_add_tail(&new_fcport->list, new_fcports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* Allocate a new replacement fcport. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (new_fcport == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return QLA_MEMORY_ALLOC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) qla2x00_free_fcport(new_fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * qlafx00_configure_all_targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * Setup target devices with node ID's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * 0 = success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * BIT_0 = error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) qlafx00_configure_all_targets(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) fc_port_t *fcport, *rmptemp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) LIST_HEAD(new_fcports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) FXDISC_GET_TGT_NODE_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) rval = qlafx00_find_all_targets(vha, &new_fcports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * Delete all previous devices marked lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) list_for_each_entry(fcport, &vha->vp_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (fcport->port_type != FCT_INITIATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) qla2x00_mark_device_lost(vha, fcport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * Add the new devices to our devices list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) qla2x00_update_fcport(vha, fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) list_move_tail(&fcport->list, &vha->vp_fcports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) ql_log(ql_log_info, vha, 0x208f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) "Attach new target id 0x%x wwnn = %llx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) "wwpn = %llx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) fcport->tgt_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) (unsigned long long)wwn_to_u64(fcport->node_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) (unsigned long long)wwn_to_u64(fcport->port_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* Free all new device structures not processed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) list_del(&fcport->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) qla2x00_free_fcport(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * qlafx00_configure_devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * Updates Fibre Channel Device Database with what is actually on loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * 0 = success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * 1 = error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * 2 = database was full and device was not configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) qlafx00_configure_devices(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) flags = vha->dpc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) ql_dbg(ql_dbg_disc, vha, 0x2090,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) "Configure devices -- dpc flags =0x%lx\n", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) rval = qlafx00_configure_all_targets(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (rval == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) atomic_set(&vha->loop_state, LOOP_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) ql_log(ql_log_info, vha, 0x2091,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) "Device Ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ql_dbg(ql_dbg_disc, vha, 0x2092,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) "%s *** FAILED ***.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) ql_dbg(ql_dbg_disc, vha, 0x2093,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) "%s: exiting normally.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) vha->flags.online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ha->mr.fw_hbt_en = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!critemp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ha->flags.chip_reset_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) vha->qla_stats.total_isp_aborts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ql_log(ql_log_info, vha, 0x013f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) "Performing ISP error recovery - ha = %p.\n", ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) ha->isp_ops->reset_chip(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) atomic_set(&vha->loop_state, LOOP_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) atomic_set(&vha->loop_down_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) QLAFX00_LOOP_DOWN_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (!atomic_read(&vha->loop_down_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) atomic_set(&vha->loop_down_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) QLAFX00_LOOP_DOWN_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /* Clear all async request states across all VPs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) list_for_each_entry(fcport, &vha->vp_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) fcport->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (atomic_read(&fcport->state) == FCS_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (!ha->flags.eeh_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (critemp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* Requeue all commands in outstanding command list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) qla2x00_abort_all_cmds(vha, DID_RESET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) qla2x00_free_irqs(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (critemp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* Clear the Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ql_log(ql_log_info, vha, 0x0140,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) "%s Done done - ha=%p.\n", __func__, ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * qlafx00_init_response_q_entries() - Initializes response queue entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * @rsp: response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Beginning of request ring has initialization control block already built
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * by nvram config routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) qlafx00_init_response_q_entries(struct rsp_que *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) uint16_t cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) response_t *pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) rsp->ring_ptr = rsp->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) rsp->ring_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) rsp->status_srb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) pkt = rsp->ring_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) for (cnt = 0; cnt < rsp->length; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) pkt->signature = RESPONSE_PROCESSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) wrt_reg_dword((void __force __iomem *)&pkt->signature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) RESPONSE_PROCESSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) pkt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) qlafx00_rescan_isp(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) uint32_t status = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) uint32_t aenmbx7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) aenmbx7 = rd_reg_dword(®->aenmailbox7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ha->mbx_intr_code = MSW(aenmbx7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ha->rqstq_intr_code = LSW(aenmbx7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) ha->req_que_off = rd_reg_dword(®->aenmailbox1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ha->rsp_que_off = rd_reg_dword(®->aenmailbox3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) ha->req_que_len = rd_reg_dword(®->aenmailbox5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ha->rsp_que_len = rd_reg_dword(®->aenmailbox6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ql_dbg(ql_dbg_disc, vha, 0x2094,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) " Req que offset 0x%x Rsp que offset 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ha->mbx_intr_code, ha->rqstq_intr_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) ha->req_que_off, ha->rsp_que_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* Clear the Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) status = qla2x00_init_rings(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) vha->flags.online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /* if no cable then assume it's good */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if ((vha->device_flags & DFLG_NO_CABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* Register system information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (qlafx00_fx_disc(vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) ql_dbg(ql_dbg_disc, vha, 0x2095,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) "failed to register host info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) scsi_unblock_requests(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) qlafx00_timer_routine(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) uint32_t fw_heart_beat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) uint32_t aenmbx0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) uint32_t tempc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* Check firmware health */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (ha->mr.fw_hbt_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ha->mr.fw_hbt_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if ((!ha->flags.mr_reset_hdlr_active) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) (!test_bit(UNLOADING, &vha->dpc_flags)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) (ha->mr.fw_hbt_en)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) fw_heart_beat = rd_reg_dword(®->fwheartbeat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ha->mr.old_fw_hbt_cnt = fw_heart_beat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ha->mr.fw_hbt_miss_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) ha->mr.fw_hbt_miss_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (ha->mr.fw_hbt_miss_cnt ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) QLAFX00_HEARTBEAT_MISS_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) set_bit(ISP_ABORT_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) ha->mr.fw_hbt_miss_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /* Reset recovery to be performed in timer routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) aenmbx0 = rd_reg_dword(®->aenmailbox0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (ha->mr.fw_reset_timer_exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ha->mr.fw_reset_timer_exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /* Wake up DPC to rescan the targets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) } else if ((aenmbx0 == MBA_FW_STARTING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) (!ha->mr.fw_hbt_en)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) ha->mr.fw_hbt_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) } else if (!ha->mr.fw_reset_timer_tick) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (aenmbx0 == ha->mr.old_aenmbx0_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) ha->mr.fw_reset_timer_exp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) } else if (aenmbx0 == 0xFFFFFFFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) uint32_t data0, data1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) data0 = QLAFX00_RD_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) QLAFX00_BAR1_BASE_ADDR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) data1 = QLAFX00_RD_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) data0 &= 0xffff0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) data1 &= 0x0000ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) QLAFX00_WR_REG(ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) (data0 | data1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) ha->mr.fw_reset_timer_tick =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) QLAFX00_MAX_RESET_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) } else if (aenmbx0 == MBA_FW_RESET_FCT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) ha->mr.fw_reset_timer_tick =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) QLAFX00_MAX_RESET_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (ha->mr.old_aenmbx0_state != aenmbx0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) ha->mr.old_aenmbx0_state = aenmbx0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) ha->mr.fw_reset_timer_tick--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * Critical temperature recovery to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * performed in timer routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (ha->mr.fw_critemp_timer_tick == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) tempc = QLAFX00_GET_TEMPERATURE(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) ql_dbg(ql_dbg_timer, vha, 0x6012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) "ISPFx00(%s): Critical temp timer, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) "current SOC temperature: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) __func__, tempc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (tempc < ha->mr.critical_temperature) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) clear_bit(FX00_CRITEMP_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ha->mr.fw_critemp_timer_tick =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) QLAFX00_CRITEMP_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ha->mr.fw_critemp_timer_tick--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (ha->mr.host_info_resend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * Incomplete host info might be sent to firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * durinng system boot - info should be resend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (ha->mr.hinfo_resend_timer_tick == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ha->mr.host_info_resend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ha->mr.hinfo_resend_timer_tick =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) QLAFX00_HINFO_RESEND_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) ha->mr.hinfo_resend_timer_tick--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * qlfx00a_reset_initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * Re-initialize after a iSA device reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * 0 = success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) qlafx00_reset_initialize(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (vha->device_flags & DFLG_DEV_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) ql_dbg(ql_dbg_init, vha, 0x0142,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) "Device in failed state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) ha->flags.mr_reset_hdlr_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (vha->flags.online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) scsi_block_requests(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) qlafx00_abort_isp_cleanup(vha, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) ql_log(ql_log_info, vha, 0x0143,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) "(%s): succeeded.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ha->flags.mr_reset_hdlr_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * qlafx00_abort_isp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * Resets ISP and aborts all outstanding commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * 0 = success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) qlafx00_abort_isp(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (vha->flags.online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (unlikely(pci_channel_offline(ha->pdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) ha->flags.pci_channel_io_perm_failure)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) scsi_block_requests(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) qlafx00_abort_isp_cleanup(vha, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) scsi_block_requests(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) vha->qla_stats.total_isp_aborts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) ha->isp_ops->reset_chip(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /* Clear the Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) ql_log(ql_log_info, vha, 0x0145,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) "(%s): succeeded.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) static inline fc_port_t*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /* Check for matching device in remote port list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) list_for_each_entry(fcport, &vha->vp_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (fcport->tgt_id == tgt_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) ql_dbg(ql_dbg_async, vha, 0x5072,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) "Matching fcport(%p) found with TGT-ID: 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) "and Remote TGT_ID: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) fcport, fcport->tgt_id, tgt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) ql_log(ql_log_info, vha, 0x5073,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) "Detach TGT-ID: 0x%x\n", tgt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) fcport = qlafx00_get_fcport(vha, tgt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (!fcport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) qla2x00_mark_device_lost(vha, fcport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) uint32_t aen_code, aen_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) aen_code = FCH_EVT_VENDOR_UNIQUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) aen_data = evt->u.aenfx.evtcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) switch (evt->u.aenfx.evtcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (evt->u.aenfx.mbx[1] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (evt->u.aenfx.mbx[2] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!vha->flags.fw_tgt_reported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) vha->flags.fw_tgt_reported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) atomic_set(&vha->loop_down_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) atomic_set(&vha->loop_state, LOOP_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) } else if (evt->u.aenfx.mbx[2] == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) } else if (evt->u.aenfx.mbx[1] == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (evt->u.aenfx.mbx[2] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (!vha->flags.fw_tgt_reported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) vha->flags.fw_tgt_reported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) } else if (evt->u.aenfx.mbx[2] == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) vha->device_flags |= DFLG_NO_CABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) qla2x00_mark_all_devices_lost(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) case QLAFX00_MBA_LINK_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) aen_code = FCH_EVT_LINKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) aen_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) case QLAFX00_MBA_LINK_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) aen_code = FCH_EVT_LINKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) aen_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ql_log(ql_log_info, vha, 0x5082,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) "Process critical temperature event "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) "aenmb[0]: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) evt->u.aenfx.evtcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) scsi_block_requests(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) qlafx00_abort_isp_cleanup(vha, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) scsi_unblock_requests(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) fc_host_post_event(vha->host, fc_get_event_number(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) aen_code, aen_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) u64 port_name = 0, node_name = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) fc_host_node_name(vha->host) = node_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) fc_host_port_name(vha->host) = port_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (!pinfo->port_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) vha->hw->current_topology = ISP_CFG_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) atomic_set(&vha->loop_state, LOOP_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) atomic_set(&vha->loop_state, LOOP_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) qla2x00_fxdisc_iocb_timeout(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) srb_t *sp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct srb_iocb *lio = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) complete(&lio->u.fxiocb.fxiocb_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) static void qla2x00_fxdisc_sp_done(srb_t *sp, int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct srb_iocb *lio = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) complete(&lio->u.fxiocb.fxiocb_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) struct srb_iocb *fdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) int rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) struct host_system_info *phost_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct register_host_info *preg_hsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct new_utsname *p_sysid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (!sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) sp->type = SRB_FXIOCB_DCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) sp->name = "fxdisc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) fdisc = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) qla2x00_init_timer(sp, FXDISC_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) switch (fx_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) case FXDISC_GET_CONFIG_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) fdisc->u.fxiocb.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) SRB_FXDISC_RESP_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) case FXDISC_GET_PORT_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) fdisc->u.fxiocb.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) case FXDISC_GET_TGT_NODE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) fdisc->u.fxiocb.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) case FXDISC_GET_TGT_NODE_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) fdisc->u.fxiocb.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) case FXDISC_REG_HOST_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) p_sysid = utsname();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (!p_sysid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) ql_log(ql_log_warn, vha, 0x303c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) "Not able to get the system information\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) goto done_free_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) case FXDISC_ABORT_IOCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) fdisc->u.fxiocb.req_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (!fdisc->u.fxiocb.req_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) goto done_free_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (fx_type == FXDISC_REG_HOST_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) preg_hsi = (struct register_host_info *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) fdisc->u.fxiocb.req_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) phost_info = &preg_hsi->hsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) memset(preg_hsi, 0, sizeof(struct register_host_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) phost_info->os_type = OS_TYPE_LINUX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) strlcpy(phost_info->sysname, p_sysid->sysname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) sizeof(phost_info->sysname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) strlcpy(phost_info->nodename, p_sysid->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) sizeof(phost_info->nodename));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (!strcmp(phost_info->nodename, "(none)"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) ha->mr.host_info_resend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) strlcpy(phost_info->release, p_sysid->release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) sizeof(phost_info->release));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) strlcpy(phost_info->version, p_sysid->version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) sizeof(phost_info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) strlcpy(phost_info->machine, p_sysid->machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) sizeof(phost_info->machine));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) strlcpy(phost_info->domainname, p_sysid->domainname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) sizeof(phost_info->domainname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) sizeof(phost_info->hostdriver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ql_dbg(ql_dbg_init, vha, 0x0149,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) "ISP%04X: Host registration with firmware\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) ha->pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) ql_dbg(ql_dbg_init, vha, 0x014a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) "os_type = '%d', sysname = '%s', nodname = '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) phost_info->os_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) phost_info->sysname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) phost_info->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) ql_dbg(ql_dbg_init, vha, 0x014b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) "release = '%s', version = '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) phost_info->release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) phost_info->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) ql_dbg(ql_dbg_init, vha, 0x014c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) "machine = '%s' "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) "domainname = '%s', hostdriver = '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) phost_info->machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) phost_info->domainname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) phost_info->hostdriver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) phost_info, sizeof(*phost_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) fdisc->u.fxiocb.rsp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (!fdisc->u.fxiocb.rsp_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) goto done_unmap_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) sp->done = qla2x00_fxdisc_sp_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) rval = qla2x00_start_sp(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (rval != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) goto done_unmap_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (fx_type == FXDISC_GET_CONFIG_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) struct config_info_data *pinfo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) strlcpy(vha->hw->model_number, pinfo->model_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) ARRAY_SIZE(vha->hw->model_number));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) strlcpy(vha->hw->model_desc, pinfo->model_description,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) ARRAY_SIZE(vha->hw->model_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) sizeof(vha->hw->mr.symbolic_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) sizeof(vha->hw->mr.serial_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) sizeof(vha->hw->mr.hw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) sizeof(vha->hw->mr.fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) strim(vha->hw->mr.fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) sizeof(vha->hw->mr.uboot_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) sizeof(vha->hw->mr.fru_serial_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) vha->hw->mr.critical_temperature =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) (pinfo->nominal_temp_value) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) QLAFX00_EXTENDED_IO_EN_MASK) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) } else if (fx_type == FXDISC_GET_PORT_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) struct port_info_data *pinfo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) vha->d_id.b.domain = pinfo->port_id[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) vha->d_id.b.area = pinfo->port_id[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) vha->d_id.b.al_pa = pinfo->port_id[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) qlafx00_update_host_attr(vha, pinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) pinfo, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct qlafx00_tgt_node_info *pinfo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) fcport->port_type = FCT_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) pinfo, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) struct qlafx00_tgt_node_info *pinfo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) pinfo, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) } else if (fx_type == FXDISC_ABORT_IOCTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) fdisc->u.fxiocb.result =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) (fdisc->u.fxiocb.result ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) rval = le32_to_cpu(fdisc->u.fxiocb.result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) done_unmap_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (fdisc->u.fxiocb.rsp_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) done_unmap_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (fdisc->u.fxiocb.req_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) done_free_sp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) sp->free(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * qlafx00_initialize_adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * Initialize board.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * 0 = success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) qlafx00_initialize_adapter(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) uint32_t tempc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /* Clear adapter flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) vha->flags.online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) ha->flags.chip_reset_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) vha->flags.reset_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) ha->flags.pci_channel_io_perm_failure = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) ha->flags.eeh_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) atomic_set(&vha->loop_state, LOOP_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) vha->device_flags = DFLG_NO_CABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) vha->dpc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) vha->flags.management_server_logged_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) ha->isp_abort_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) ha->beacon_blink_led = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) set_bit(0, ha->req_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) set_bit(0, ha->rsp_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) ql_dbg(ql_dbg_init, vha, 0x0147,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) "Configuring PCI space...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) rval = ha->isp_ops->pci_config(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) ql_log(ql_log_warn, vha, 0x0148,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) "Unable to configure PCI space.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) rval = qlafx00_init_fw_ready(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (rval != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) qlafx00_save_queue_ptrs(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) rval = qlafx00_config_queues(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (rval != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * Allocate the array of outstanding commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * now that we know the firmware resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (rval != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) rval = qla2x00_init_rings(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) ha->flags.chip_reset_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) tempc = QLAFX00_GET_TEMPERATURE(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) ql_dbg(ql_dbg_init, vha, 0x0152,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) __func__, tempc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) uint32_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) int rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) uint32_t state[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (qla2x00_reset_active(vha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) ql_log(ql_log_warn, vha, 0x70ce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) "ISP reset active.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) else if (!vha->hw->flags.eeh_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) rval = qlafx00_get_firmware_state(vha, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) if (rval != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) memset(state, -1, sizeof(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) return state[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) qlafx00_get_host_speed(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) struct qla_hw_data *ha = ((struct scsi_qla_host *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) (shost_priv(shost)))->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) u32 speed = FC_PORTSPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) switch (ha->link_data_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) case QLAFX00_PORT_SPEED_2G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) speed = FC_PORTSPEED_2GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) case QLAFX00_PORT_SPEED_4G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) speed = FC_PORTSPEED_4GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) case QLAFX00_PORT_SPEED_8G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) speed = FC_PORTSPEED_8GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) case QLAFX00_PORT_SPEED_10G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) speed = FC_PORTSPEED_10GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) fc_host_speed(shost) = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /** QLAFX00 specific ISR implementation functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) uint32_t sense_len, struct rsp_que *rsp, int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) struct scsi_qla_host *vha = sp->vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) struct scsi_cmnd *cp = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) uint32_t track_sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) SET_FW_SENSE_LEN(sp, sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (sense_len >= SCSI_SENSE_BUFFERSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) sense_len = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) SET_CMD_SENSE_LEN(sp, sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) track_sense_len = sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (sense_len > par_sense_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) sense_len = par_sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) memcpy(cp->sense_buffer, sense_data, sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) track_sense_len -= sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) SET_CMD_SENSE_LEN(sp, track_sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) ql_dbg(ql_dbg_io, vha, 0x304d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) sense_len, par_sense_len, track_sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (GET_FW_SENSE_LEN(sp) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) rsp->status_srb = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) cp->result = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (sense_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) sp->vha->host_no, cp->device->id, cp->device->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) cp->sense_buffer, sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) __le16 sstatus, __le16 cpstatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) struct srb_iocb *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) tmf = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) tmf->u.tmf.comp_status = cpstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) sp->done(sp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) struct abort_iocb_entry_fx00 *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) const char func[] = "ABT_IOCB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) struct srb_iocb *abt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (!sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) abt = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) abt->u.abt.comp_status = pkt->tgt_id_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) sp->done(sp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) struct ioctl_iocb_entry_fx00 *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) const char func[] = "IOSB_IOCB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) struct bsg_job *bsg_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) struct fc_bsg_reply *bsg_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) struct srb_iocb *iocb_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) struct qla_mt_iocb_rsp_fx00 fstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) uint8_t *fw_sts_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (!sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (sp->type == SRB_FXIOCB_DCMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) iocb_job = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) iocb_job->u.fxiocb.seq_number = pkt->seq_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) iocb_job->u.fxiocb.result = pkt->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) iocb_job->u.fxiocb.req_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) pkt->dataword_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) bsg_job = sp->u.bsg_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) bsg_reply = bsg_job->reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) fstatus.reserved_1 = pkt->reserved_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) fstatus.func_type = pkt->comp_func_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) fstatus.ioctl_flags = pkt->fw_iotcl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) fstatus.ioctl_data = pkt->dataword_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) fstatus.adapid = pkt->adapid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) fstatus.reserved_2 = pkt->dataword_r_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) fstatus.res_count = pkt->residuallen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) fstatus.status = pkt->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) fstatus.seq_number = pkt->seq_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) memcpy(fstatus.reserved_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) pkt->reserved_2, 20 * sizeof(uint8_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) sp->vha, 0x5080, pkt, sizeof(*pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) sp->vha, 0x5074,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) fw_sts_ptr, sizeof(fstatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) res = bsg_reply->result = DID_OK << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) bsg_reply->reply_payload_rcv_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) bsg_job->reply_payload.payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) sp->done(sp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * qlafx00_status_entry() - Process a Status IOCB entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * @vha: SCSI driver HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * @rsp: response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * @pkt: Entry pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) struct scsi_cmnd *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) struct sts_entry_fx00 *sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) __le16 comp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) __le16 scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) __le16 lscsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) int32_t resid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) fw_resid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) uint8_t *rsp_info = NULL, *sense_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) uint32_t hindex, handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) uint16_t que;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) struct req_que *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) int logit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) sts = (struct sts_entry_fx00 *) pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) comp_status = sts->comp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) hindex = sts->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) handle = LSW(hindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) que = MSW(hindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) req = ha->req_q_map[que];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /* Validate handle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (handle < req->num_outstanding_cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) sp = req->outstanding_cmds[handle];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) sp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (sp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) ql_dbg(ql_dbg_io, vha, 0x3034,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) "Invalid status handle (0x%x).\n", handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (sp->type == SRB_TM_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) req->outstanding_cmds[handle] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) qlafx00_tm_iocb_entry(vha, req, pkt, sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) scsi_status, comp_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) /* Fast path completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) if (comp_status == CS_COMPLETE && scsi_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) qla2x00_process_completed_request(vha, req, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) req->outstanding_cmds[handle] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) cp = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (cp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) ql_dbg(ql_dbg_io, vha, 0x3048,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) "Command already returned (0x%x/%p).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) handle, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) fcport = sp->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) sense_len = par_sense_len = rsp_info_len = resid_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) fw_resid_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) sense_len = sts->sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) | (uint16_t)SS_RESIDUAL_OVER)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) resid_len = le32_to_cpu(sts->residual_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) fw_resid_len = le32_to_cpu(sts->residual_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) rsp_info = sense_data = sts->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) par_sense_len = sizeof(sts->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) /* Check for overrun. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (comp_status == CS_COMPLETE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) * Based on Host and scsi status generate status code for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) switch (le16_to_cpu(comp_status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) case CS_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) case CS_QUEUE_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (scsi_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) res = DID_OK << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) | (uint16_t)SS_RESIDUAL_OVER))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) resid = resid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) scsi_set_resid(cp, resid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (!lscsi_status &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) ((unsigned)(scsi_bufflen(cp) - resid) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) cp->underflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) "Mid-layer underflow "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) "detected (0x%x of 0x%x bytes).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) resid, scsi_bufflen(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) res = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) res = DID_OK << 16 | le16_to_cpu(lscsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (lscsi_status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) "QUEUE FULL detected.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) logit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) rsp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) case CS_DATA_UNDERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /* Use F/W calculated residual length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) resid = fw_resid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) resid = resid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) scsi_set_resid(cp, resid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) && fw_resid_len != resid_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) "Dropped frame(s) detected "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) "(0x%x of 0x%x bytes).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) resid, scsi_bufflen(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) res = DID_ERROR << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) le16_to_cpu(lscsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) goto check_scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (!lscsi_status &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) ((unsigned)(scsi_bufflen(cp) - resid) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) cp->underflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) "Mid-layer underflow "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) "detected (0x%x of 0x%x bytes, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) "cp->underflow: 0x%x).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) resid, scsi_bufflen(cp), cp->underflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) res = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) } else if (lscsi_status !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) * scsi status of task set and busy are considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * to be task not completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) "Dropped frame(s) detected (0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) "of 0x%x bytes).\n", resid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) scsi_bufflen(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) goto check_scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) "scsi_status: 0x%x, lscsi_status: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) scsi_status, lscsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) res = DID_OK << 16 | le16_to_cpu(lscsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) logit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) check_scsi_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * Check to see if SCSI Status is non zero. If so report SCSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * Status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (lscsi_status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (lscsi_status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) "QUEUE FULL detected.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) logit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (lscsi_status !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) if (!(scsi_status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) qlafx00_handle_sense(sp, sense_data, par_sense_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) sense_len, rsp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) case CS_PORT_LOGGED_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) case CS_PORT_CONFIG_CHG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) case CS_PORT_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) case CS_INCOMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) case CS_PORT_UNAVAILABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) case CS_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) case CS_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * We are going to have the fc class block the rport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) * while we try to recover so instruct the mid layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * to requeue until the class decides how to handle this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) res = DID_TRANSPORT_DISRUPTED << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) "Port down status: port-state=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) atomic_read(&fcport->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (atomic_read(&fcport->state) == FCS_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) qla2x00_mark_device_lost(fcport->vha, fcport, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) case CS_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) res = DID_RESET << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) res = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (logit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) "par_sense_len=0x%x, rsp_info_len=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) comp_status, scsi_status, res, vha->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) cp->device->id, cp->device->lun, fcport->tgt_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) lscsi_status, cp->cmnd, scsi_bufflen(cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) rsp_info, resid_len, fw_resid_len, sense_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) par_sense_len, rsp_info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (rsp->status_srb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) sp->done(sp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) WARN_ON_ONCE(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * qlafx00_status_cont_entry() - Process a Status Continuations entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * @rsp: response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) * @pkt: Entry pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * Extended sense data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) uint8_t sense_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct qla_hw_data *ha = rsp->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) srb_t *sp = rsp->status_srb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) struct scsi_cmnd *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) uint32_t sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) uint8_t *sense_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (!sp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) ql_dbg(ql_dbg_io, vha, 0x3037,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) "no SP, sp = %p\n", sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (!GET_FW_SENSE_LEN(sp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) ql_dbg(ql_dbg_io, vha, 0x304b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) "no fw sense data, sp = %p\n", sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) cp = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (cp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) ql_log(ql_log_warn, vha, 0x303b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) "cmd is NULL: already returned to OS (sp=%p).\n", sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) rsp->status_srb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (!GET_CMD_SENSE_LEN(sp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) ql_dbg(ql_dbg_io, vha, 0x304c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) "no sense data, sp = %p\n", sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) sense_len = GET_CMD_SENSE_LEN(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) sense_ptr = GET_CMD_SENSE_PTR(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) ql_dbg(ql_dbg_io, vha, 0x304f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) "sp=%p sense_len=0x%x sense_ptr=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) sp, sense_len, sense_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (sense_len > sizeof(pkt->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) sense_sz = sizeof(pkt->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) sense_sz = sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /* Move sense data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) pkt, sizeof(*pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) memcpy(sense_ptr, pkt->data, sense_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) sense_ptr, sense_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) sense_len -= sense_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) sense_ptr += sense_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) SET_CMD_SENSE_PTR(sp, sense_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) SET_CMD_SENSE_LEN(sp, sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) sense_len = GET_FW_SENSE_LEN(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) sense_len = (sense_len > sizeof(pkt->data)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) (sense_len - sizeof(pkt->data)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) SET_FW_SENSE_LEN(sp, sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) /* Place command on done queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (sense_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) rsp->status_srb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) sp->done(sp, cp->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) WARN_ON_ONCE(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) * qlafx00_multistatus_entry() - Process Multi response queue entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) * @vha: SCSI driver HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) * @rsp: response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) * @pkt: received packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) qlafx00_multistatus_entry(struct scsi_qla_host *vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) struct rsp_que *rsp, void *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) struct multi_sts_entry_fx00 *stsmfx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) uint32_t handle, hindex, handle_count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) uint16_t que;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct req_que *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) __le32 *handle_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) stsmfx = (struct multi_sts_entry_fx00 *) pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) handle_count = stsmfx->handle_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (handle_count > MAX_HANDLE_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) ql_dbg(ql_dbg_io, vha, 0x3035,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) "Invalid handle count (0x%x).\n", handle_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) handle_ptr = &stsmfx->handles[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) for (i = 0; i < handle_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) hindex = le32_to_cpu(*handle_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) handle = LSW(hindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) que = MSW(hindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) req = ha->req_q_map[que];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) /* Validate handle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (handle < req->num_outstanding_cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) sp = req->outstanding_cmds[handle];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) sp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (sp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) ql_dbg(ql_dbg_io, vha, 0x3044,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) "Invalid status handle (0x%x).\n", handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) qla2x00_process_completed_request(vha, req, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) handle_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * qlafx00_error_entry() - Process an error entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * @vha: SCSI driver HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * @rsp: response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) * @pkt: Entry pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) struct sts_entry_fx00 *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) const char func[] = "ERROR-IOCB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) uint16_t que = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) struct req_que *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) int res = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) req = ha->req_q_map[que];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (sp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) sp->done(sp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * qlafx00_process_response_queue() - Process response queue entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * @vha: SCSI driver HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * @rsp: response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) qlafx00_process_response_queue(struct scsi_qla_host *vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) struct rsp_que *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) struct sts_entry_fx00 *pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) response_t *lptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) uint16_t lreq_q_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) uint16_t lreq_q_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) lreq_q_in = rd_reg_dword(rsp->rsp_q_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) lreq_q_out = rsp->ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) while (lreq_q_in != lreq_q_out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) lptr = rsp->ring_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) sizeof(rsp->rsp_pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) rsp->ring_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) lreq_q_out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) if (rsp->ring_index == rsp->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) lreq_q_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) rsp->ring_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) rsp->ring_ptr = rsp->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) rsp->ring_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) if (pkt->entry_status != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) ql_dbg(ql_dbg_async, vha, 0x507f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) "type of error status in response: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) pkt->entry_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) qlafx00_error_entry(vha, rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) (struct sts_entry_fx00 *)pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) switch (pkt->entry_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) case STATUS_TYPE_FX00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) qlafx00_status_entry(vha, rsp, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) case STATUS_CONT_TYPE_FX00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) case MULTI_STATUS_TYPE_FX00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) qlafx00_multistatus_entry(vha, rsp, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) case ABORT_IOCB_TYPE_FX00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) qlafx00_abort_iocb_entry(vha, rsp->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) (struct abort_iocb_entry_fx00 *)pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) case IOCTL_IOSB_TYPE_FX00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) qlafx00_ioctl_iosb_entry(vha, rsp->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) (struct ioctl_iocb_entry_fx00 *)pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) /* Type Not Supported. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) ql_dbg(ql_dbg_async, vha, 0x5081,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) "Received unknown response pkt type %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) "entry status=%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) pkt->entry_type, pkt->entry_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) /* Adjust ring index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) * qlafx00_async_event() - Process aynchronous events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) * @vha: SCSI driver HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) qlafx00_async_event(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) struct device_reg_fx00 __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) int data_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) reg = &ha->iobase->ispfx00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) /* Setup to process RIO completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) switch (ha->aenmb[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) ql_log(ql_log_warn, vha, 0x5079,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) ql_dbg(ql_dbg_async, vha, 0x5076,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) "Asynchronous FW shutdown requested.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) ha->aenmb[1] = rd_reg_dword(®->aenmailbox1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) ha->aenmb[2] = rd_reg_dword(®->aenmailbox2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) ha->aenmb[3] = rd_reg_dword(®->aenmailbox3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) ql_dbg(ql_dbg_async, vha, 0x5077,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) "Asynchronous port Update received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) data_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) ql_log(ql_log_info, vha, 0x5085,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) "Asynchronous over temperature event received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) "aenmb[0]: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) ha->aenmb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) ql_log(ql_log_info, vha, 0x5086,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) "Asynchronous normal temperature event received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) "aenmb[0]: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) ha->aenmb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) ql_log(ql_log_info, vha, 0x5083,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) "Asynchronous critical temperature event received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) "aenmb[0]: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) ha->aenmb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) ha->aenmb[1] = rd_reg_dword(®->aenmailbox1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) ha->aenmb[2] = rd_reg_dword(®->aenmailbox2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) ha->aenmb[3] = rd_reg_dword(®->aenmailbox3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) ha->aenmb[4] = rd_reg_dword(®->aenmailbox4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) ha->aenmb[5] = rd_reg_dword(®->aenmailbox5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) ha->aenmb[6] = rd_reg_dword(®->aenmailbox6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) ha->aenmb[7] = rd_reg_dword(®->aenmailbox7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) ql_dbg(ql_dbg_async, vha, 0x5078,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) qlafx00_post_aenfx_work(vha, ha->aenmb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) (uint32_t *)ha->aenmb, data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) * qlafx00x_mbx_completion() - Process mailbox command completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) * @vha: SCSI driver HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) * @mb0: value to be written into mailbox register 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) uint16_t cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) __le32 __iomem *wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) if (!ha->mcp32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) /* Load return mailbox registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) ha->flags.mbox_int = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) ha->mailbox_out32[0] = mb0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) wptr = ®->mailbox17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) for (cnt = 1; cnt < ha->mbx_count; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) wptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) * @irq: interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) * @dev_id: SCSI driver HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) * Called by system whenever the host adapter generates an interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) * Returns handled flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) qlafx00_intr_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) scsi_qla_host_t *vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) struct qla_hw_data *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) struct device_reg_fx00 __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) unsigned long iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) uint32_t stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) uint32_t mb[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) struct rsp_que *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) uint32_t clr_intr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) uint32_t intr_stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) rsp = (struct rsp_que *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) if (!rsp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) ql_log(ql_log_info, NULL, 0x507d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) "%s: NULL response queue pointer.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) ha = rsp->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) reg = &ha->iobase->ispfx00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if (unlikely(pci_channel_offline(ha->pdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) for (iter = 50; iter--; clr_intr = 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) stat = QLAFX00_RD_INTR_REG(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) if (qla2x00_check_reg32_for_disconnect(vha, stat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (!intr_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) if (stat & QLAFX00_INTR_MB_CMPLT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) mb[0] = rd_reg_dword(®->mailbox16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) qlafx00_mbx_completion(vha, mb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) status |= MBX_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) clr_intr |= QLAFX00_INTR_MB_CMPLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) ha->aenmb[0] = rd_reg_dword(®->aenmailbox0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) qlafx00_async_event(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) qlafx00_process_response_queue(vha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) clr_intr |= QLAFX00_INTR_RSP_CMPLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) QLAFX00_CLR_INTR_REG(ha, clr_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) QLAFX00_RD_INTR_REG(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) qla2x00_handle_mbx_completion(ha, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) /** QLAFX00 specific IOCB implementation functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) static inline cont_a64_entry_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) qlafx00_prep_cont_type1_iocb(struct req_que *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) cont_a64_entry_t *lcont_pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) cont_a64_entry_t *cont_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) /* Adjust ring index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) req->ring_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) if (req->ring_index == req->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) req->ring_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) req->ring_ptr = req->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) req->ring_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) /* Load packet defaults. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) return cont_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) uint16_t avail_dsds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) struct dsd64 *cur_dsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) scsi_qla_host_t *vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) struct scsi_cmnd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) int i, cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) struct req_que *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) cont_a64_entry_t lcont_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) cont_a64_entry_t *cont_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) vha = sp->vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) req = vha->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) cmd = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) cont = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) cont_pkt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) /* Update entry type to indicate Command Type 3 IOCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) /* No data transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) lcmd_pkt->byte_count = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) /* Set transfer direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) if (cmd->sc_data_direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) vha->qla_stats.output_bytes += scsi_bufflen(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) lcmd_pkt->cntrl_flags = TMF_READ_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) vha->qla_stats.input_bytes += scsi_bufflen(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) /* One DSD is available in the Command Type 3 IOCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) avail_dsds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) cur_dsd = &lcmd_pkt->dsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) /* Load data segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) scsi_for_each_sg(cmd, sg, tot_dsds, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) /* Allocate additional continuation packets? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) if (avail_dsds == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) * Five DSDs are available in the Continuation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) * Type 1 IOCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) cont_pkt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) cur_dsd = lcont_pkt.dsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) avail_dsds = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) cont = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) append_dsd64(&cur_dsd, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) avail_dsds--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) if (avail_dsds == 0 && cont == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) cont = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) sizeof(lcont_pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) if (avail_dsds != 0 && cont == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) sizeof(lcont_pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) * qlafx00_start_scsi() - Send a SCSI command to the ISP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) * @sp: command to send to the ISP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) * Returns non-zero if a failure occurred, else zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) qlafx00_start_scsi(srb_t *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) int nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) uint32_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) uint16_t cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) uint16_t req_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) uint16_t tot_dsds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) struct req_que *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) struct rsp_que *rsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) struct scsi_cmnd *cmd = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) struct scsi_qla_host *vha = sp->vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) struct cmd_type_7_fx00 *cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) struct cmd_type_7_fx00 lcmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) struct scsi_lun llun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) /* Setup device pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) rsp = ha->rsp_q_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) req = vha->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) /* So we know we haven't pci_map'ed anything yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) tot_dsds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) /* Acquire ring specific lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) handle = qla2xxx_get_next_handle(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) if (handle == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) goto queuing_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) /* Map the sg table so we have an accurate count of sg entries needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) if (scsi_sg_count(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) scsi_sg_count(cmd), cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) if (unlikely(!nseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) goto queuing_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) nseg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) tot_dsds = nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (req->cnt < (req_cnt + 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) cnt = rd_reg_dword_relaxed(req->req_q_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (req->ring_index < cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) req->cnt = cnt - req->ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) req->cnt = req->length -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) (req->ring_index - cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (req->cnt < (req_cnt + 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) goto queuing_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) /* Build command packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) req->current_outstanding_cmd = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) req->outstanding_cmds[handle] = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) sp->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) cmd->host_scribble = (unsigned char *)(unsigned long)handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) req->cnt -= req_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) lcmd_pkt.handle = make_handle(req->id, sp->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) lcmd_pkt.reserved_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) lcmd_pkt.port_path_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) lcmd_pkt.reserved_1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) int_to_scsilun(cmd->device->lun, &llun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) sizeof(lcmd_pkt.lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) /* Load SCSI command packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) /* Build IOCB segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) /* Set total data segment count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) lcmd_pkt.entry_count = (uint8_t)req_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) /* Specify response queue number where completion should happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) lcmd_pkt.entry_status = (uint8_t) rsp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) cmd->cmnd, cmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) &lcmd_pkt, sizeof(lcmd_pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) /* Adjust ring index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) req->ring_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) if (req->ring_index == req->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) req->ring_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) req->ring_ptr = req->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) req->ring_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) sp->flags |= SRB_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) /* Set chip new ring index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) wrt_reg_dword(req->req_q_in, req->ring_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) queuing_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (tot_dsds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) scsi_dma_unmap(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) struct srb_iocb *fxio = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) scsi_qla_host_t *vha = sp->vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) struct req_que *req = vha->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) struct tsk_mgmt_entry_fx00 tm_iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) struct scsi_lun llun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) tm_iocb.entry_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) tm_iocb.handle = make_handle(req->id, sp->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) tm_iocb.reserved_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) int_to_scsilun(fxio->u.tmf.lun, &llun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) sizeof(struct scsi_lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) memcpy(ptm_iocb, &tm_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) sizeof(struct tsk_mgmt_entry_fx00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) struct srb_iocb *fxio = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) scsi_qla_host_t *vha = sp->vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) struct req_que *req = vha->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) struct abort_iocb_entry_fx00 abt_iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) abt_iocb.entry_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) abt_iocb.handle = make_handle(req->id, sp->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) abt_iocb.req_que_no = cpu_to_le16(req->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) memcpy(pabt_iocb, &abt_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) sizeof(struct abort_iocb_entry_fx00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) struct srb_iocb *fxio = &sp->u.iocb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) struct bsg_job *bsg_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) struct fc_bsg_request *bsg_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) struct fxdisc_entry_fx00 fx_iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) uint8_t entry_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) fx_iocb.entry_type = FX00_IOCB_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) fx_iocb.handle = sp->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) fx_iocb.entry_count = entry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) if (sp->type == SRB_FXIOCB_DCMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) fx_iocb.func_num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) sp->u.iocb_cmd.u.fxiocb.req_func_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) fx_iocb.req_dsdcnt = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) fx_iocb.req_xfrcnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) cpu_to_le16(fxio->u.fxiocb.req_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) &fx_iocb.dseg_rq.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) fx_iocb.dseg_rq.length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) cpu_to_le32(fxio->u.fxiocb.req_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) fx_iocb.rsp_xfrcnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) cpu_to_le16(fxio->u.fxiocb.rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) &fx_iocb.dseg_rsp.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) fx_iocb.dseg_rsp.length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) cpu_to_le32(fxio->u.fxiocb.rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) fx_iocb.dataword = fxio->u.fxiocb.req_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) fx_iocb.flags = fxio->u.fxiocb.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) bsg_job = sp->u.bsg_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) bsg_request = bsg_job->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) fx_iocb.func_num = piocb_rqst->func_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) fx_iocb.adapid = piocb_rqst->adapid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) fx_iocb.reserved_0 = piocb_rqst->reserved_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) fx_iocb.reserved_1 = piocb_rqst->reserved_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) fx_iocb.dataword = piocb_rqst->dataword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) fx_iocb.req_xfrcnt = piocb_rqst->req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) int avail_dsds, tot_dsds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) cont_a64_entry_t lcont_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) cont_a64_entry_t *cont_pkt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) struct dsd64 *cur_dsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) int index = 0, cont = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) fx_iocb.req_dsdcnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) cpu_to_le16(bsg_job->request_payload.sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) tot_dsds =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) bsg_job->request_payload.sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) cur_dsd = &fx_iocb.dseg_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) avail_dsds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) for_each_sg(bsg_job->request_payload.sg_list, sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) tot_dsds, index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) /* Allocate additional continuation packets? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) if (avail_dsds == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) * Five DSDs are available in the Cont.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) * Type 1 IOCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) memset(&lcont_pkt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) cont_pkt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) qlafx00_prep_cont_type1_iocb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) sp->vha->req, &lcont_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) cur_dsd = lcont_pkt.dsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) avail_dsds = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) cont = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) append_dsd64(&cur_dsd, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) avail_dsds--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) if (avail_dsds == 0 && cont == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) cont = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) memcpy_toio(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) (void __iomem *)cont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) &lcont_pkt, REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) ql_dump_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) ql_dbg_user + ql_dbg_verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) sp->vha, 0x3042,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) (uint8_t *)&lcont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) if (avail_dsds != 0 && cont == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) memcpy_toio((void __iomem *)cont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) &lcont_pkt, REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) sp->vha, 0x3043,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) int avail_dsds, tot_dsds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) cont_a64_entry_t lcont_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) cont_a64_entry_t *cont_pkt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) struct dsd64 *cur_dsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) int index = 0, cont = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) fx_iocb.rsp_dsdcnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) cpu_to_le16(bsg_job->reply_payload.sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) tot_dsds = bsg_job->reply_payload.sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) cur_dsd = &fx_iocb.dseg_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) avail_dsds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) for_each_sg(bsg_job->reply_payload.sg_list, sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) tot_dsds, index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) /* Allocate additional continuation packets? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) if (avail_dsds == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) * Five DSDs are available in the Cont.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) * Type 1 IOCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) memset(&lcont_pkt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) cont_pkt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) qlafx00_prep_cont_type1_iocb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) sp->vha->req, &lcont_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) cur_dsd = lcont_pkt.dsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) avail_dsds = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) cont = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) append_dsd64(&cur_dsd, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) avail_dsds--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (avail_dsds == 0 && cont == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) cont = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) memcpy_toio((void __iomem *)cont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) &lcont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) ql_dump_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) ql_dbg_user + ql_dbg_verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) sp->vha, 0x3045,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) (uint8_t *)&lcont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) if (avail_dsds != 0 && cont == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) memcpy_toio((void __iomem *)cont_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) &lcont_pkt, REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) sp->vha, 0x3046,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) fx_iocb.dataword = piocb_rqst->dataword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) fx_iocb.flags = piocb_rqst->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) fx_iocb.entry_count = entry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }