^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * QLogic Fibre Channel HBA Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2003-2014 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "qla_def.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kobject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/blk-mq-pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <scsi/scsicam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <scsi/scsi_transport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <scsi/scsi_transport_fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "qla_target.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Driver version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) char qla2x00_version_str[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int apidev_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * SRB allocation cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct kmem_cache *srb_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int ql2xfulldump_on_mpifail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) "Set this to take full dump on MPI hang.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int ql2xenforce_iocb_limit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) MODULE_PARM_DESC(ql2xenforce_iocb_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * CT6 CTX allocation cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static struct kmem_cache *ctx_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * error level for logging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) uint ql_errlev = 0x8001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int ql2xenableclass2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) MODULE_PARM_DESC(ql2xenableclass2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "Specify if Class 2 operations are supported from the very "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) "beginning. Default is 0 - class 2 not supported.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int ql2xlogintimeout = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) module_param(ql2xlogintimeout, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MODULE_PARM_DESC(ql2xlogintimeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) "Login timeout value in seconds.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int qlport_down_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) module_param(qlport_down_retry, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MODULE_PARM_DESC(qlport_down_retry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "Maximum number of command retries to a port that returns "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "a PORT-DOWN status.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int ql2xplogiabsentdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) MODULE_PARM_DESC(ql2xplogiabsentdevice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) "Option to enable PLOGI to devices that are not present after "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) "a Fabric scan. This is needed for several broken switches. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) "Default is 0 - no PLOGI. 1 - perform PLOGI.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int ql2xloginretrycount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) module_param(ql2xloginretrycount, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) MODULE_PARM_DESC(ql2xloginretrycount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) "Specify an alternate value for the NVRAM login retry count.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int ql2xallocfwdump = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) module_param(ql2xallocfwdump, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) MODULE_PARM_DESC(ql2xallocfwdump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) "Option to enable allocation of memory for a firmware dump "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) "during HBA initialization. Memory allocation requirements "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) "vary by ISP type. Default is 1 - allocate memory.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int ql2xextended_error_logging;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) MODULE_PARM_DESC(ql2xextended_error_logging,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) "Option to enable extended error logging,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) "\t\t0x1e400000 - Preferred value for capturing essential "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) "debug information (equivalent to old "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) "ql2xextended_error_logging=1).\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) "\t\tDo LOGICAL OR of the value to enable more than one level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int ql2xshiftctondsd = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) module_param(ql2xshiftctondsd, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) MODULE_PARM_DESC(ql2xshiftctondsd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) "Set to control shifting of command type processing "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) "based on total number of SG elements.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int ql2xfdmienable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) MODULE_PARM_DESC(ql2xfdmienable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) "Enables FDMI registrations. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) "0 - no FDMI registrations. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) "1 - provide FDMI registrations (default).");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define MAX_Q_DEPTH 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int ql2xmaxqdepth = MAX_Q_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) MODULE_PARM_DESC(ql2xmaxqdepth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) "Maximum queue depth to set for each LUN. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "Default is 64.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int ql2xenabledif = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) module_param(ql2xenabledif, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) MODULE_PARM_DESC(ql2xenabledif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) " Enable T10-CRC-DIF:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) " Default is 2.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) " 0 -- No DIF Support\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) " 1 -- Enable DIF for all types\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) " 2 -- Enable DIF for all types, except Type 0.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int ql2xnvmeenable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int ql2xnvmeenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) module_param(ql2xnvmeenable, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) MODULE_PARM_DESC(ql2xnvmeenable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) "Enables NVME support. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) "0 - no NVMe. Default is Y");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int ql2xenablehba_err_chk = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) MODULE_PARM_DESC(ql2xenablehba_err_chk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) " Enable T10-CRC-DIF Error isolation by HBA:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) " Default is 2.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) " 0 -- Error isolation disabled\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) " 1 -- Error isolation enabled only for DIX Type 0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) " 2 -- Error isolation enabled for all Types\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int ql2xiidmaenable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) module_param(ql2xiidmaenable, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) MODULE_PARM_DESC(ql2xiidmaenable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) "Enables iIDMA settings "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) "Default is 1 - perform iIDMA. 0 - no iIDMA.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int ql2xmqsupport = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) module_param(ql2xmqsupport, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) MODULE_PARM_DESC(ql2xmqsupport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) "Enable on demand multiple queue pairs support "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) "Default is 1 for supported. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) "Set it to 0 to turn off mq qpair support.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int ql2xfwloadbin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) MODULE_PARM_DESC(ql2xfwloadbin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "Option to specify location from which to load ISP firmware:.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) " 2 -- load firmware via the request_firmware() (hotplug).\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) " interface.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) " 1 -- load firmware from flash.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) " 0 -- use default semantics.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int ql2xetsenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) module_param(ql2xetsenable, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) MODULE_PARM_DESC(ql2xetsenable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) "Enables firmware ETS burst."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) "Default is 0 - skip ETS enablement.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int ql2xdbwr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) MODULE_PARM_DESC(ql2xdbwr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) "Option to specify scheme for request queue posting.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) " 0 -- Regular doorbell.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) " 1 -- CAMRAM doorbell (faster).\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int ql2xgffidenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) module_param(ql2xgffidenable, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) MODULE_PARM_DESC(ql2xgffidenable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) "Enables GFF_ID checks of port type. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) "Default is 0 - Do not use GFF_ID information.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int ql2xasynctmfenable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) module_param(ql2xasynctmfenable, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) MODULE_PARM_DESC(ql2xasynctmfenable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int ql2xdontresethba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) MODULE_PARM_DESC(ql2xdontresethba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) "Option to specify reset behaviour.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) " 0 (Default) -- Reset on failure.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) " 1 -- Do not reset on failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) uint64_t ql2xmaxlun = MAX_LUNS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) module_param(ql2xmaxlun, ullong, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) MODULE_PARM_DESC(ql2xmaxlun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) "Defines the maximum LU number to register with the SCSI "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) "midlayer. Default is 65535.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int ql2xmdcapmask = 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) module_param(ql2xmdcapmask, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) MODULE_PARM_DESC(ql2xmdcapmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) "Set the Minidump driver capture mask level. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int ql2xmdenable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) module_param(ql2xmdenable, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) MODULE_PARM_DESC(ql2xmdenable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) "Enable/disable MiniDump. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) "0 - MiniDump disabled. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) "1 (Default) - MiniDump enabled.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int ql2xexlogins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) MODULE_PARM_DESC(ql2xexlogins,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) "Number of extended Logins. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) "0 (Default)- Disabled.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int ql2xexchoffld = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) module_param(ql2xexchoffld, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) MODULE_PARM_DESC(ql2xexchoffld,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) "Number of target exchanges.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int ql2xiniexchg = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) module_param(ql2xiniexchg, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) MODULE_PARM_DESC(ql2xiniexchg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) "Number of initiator exchanges.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int ql2xfwholdabts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) module_param(ql2xfwholdabts, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) MODULE_PARM_DESC(ql2xfwholdabts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) "Allow FW to hold status IOCB until ABTS rsp received. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) "0 (Default) Do not set fw option. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) "1 - Set fw option to hold ABTS.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int ql2xmvasynctoatio = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) MODULE_PARM_DESC(ql2xmvasynctoatio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) "0 (Default). Do not move IOCBs"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) "1 - Move IOCBs.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int ql2xautodetectsfp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) module_param(ql2xautodetectsfp, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) MODULE_PARM_DESC(ql2xautodetectsfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) "Detect SFP range and set appropriate distance.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) "1 (Default): Enable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int ql2xenablemsix = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) module_param(ql2xenablemsix, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) MODULE_PARM_DESC(ql2xenablemsix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) "Set to enable MSI or MSI-X interrupt mechanism.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) " Default is 1, enable MSI-X interrupt mechanism.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) " 0 -- enable traditional pin-based mechanism.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) " 1 -- enable MSI-X interrupt mechanism.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) " 2 -- enable MSI interrupt mechanism.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int qla2xuseresexchforels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) module_param(qla2xuseresexchforels, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) MODULE_PARM_DESC(qla2xuseresexchforels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) "Reserve 1/2 of emergency exchanges for ELS.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) " 0 (default): disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int ql2xprotmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) module_param(ql2xprotmask, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) MODULE_PARM_DESC(ql2xprotmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) "Override DIF/DIX protection capabilities mask\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) "Default is 0 which sets protection mask based on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) "capabilities reported by HBA firmware.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static int ql2xprotguard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) module_param(ql2xprotguard, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) " 0 -- Let HBA firmware decide\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) " 1 -- Force T10 CRC\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) " 2 -- Force IP checksum\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int ql2xdifbundlinginternalbuffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) module_param(ql2xdifbundlinginternalbuffers, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) "Force using internal buffers for DIF information\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) "0 (Default). Based on check.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) "1 Force using internal buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int ql2xsmartsan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) module_param(ql2xsmartsan, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) module_param_named(smartsan, ql2xsmartsan, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) MODULE_PARM_DESC(ql2xsmartsan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) "Send SmartSAN Management Attributes for FDMI Registration."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) " Default is 0 - No SmartSAN registration,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) " 1 - Register SmartSAN Management Attributes.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int ql2xrdpenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) module_param(ql2xrdpenable, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) module_param_named(rdpenable, ql2xrdpenable, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) MODULE_PARM_DESC(ql2xrdpenable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) "Enables RDP responses. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) "0 - no RDP responses (default). "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) "1 - provide RDP responses.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void qla2x00_clear_drv_active(struct qla_hw_data *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void qla2x00_free_device(scsi_qla_host_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int qla2xxx_map_queues(struct Scsi_Host *shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static struct scsi_transport_template *qla2xxx_transport_template = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* TODO Convert to inlines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Timer routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) __inline__ void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) timer_setup(&vha->timer, qla2x00_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) vha->timer.expires = jiffies + interval * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) add_timer(&vha->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) vha->timer_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Currently used for 82XX only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (vha->device_flags & DFLG_DEV_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ql_dbg(ql_dbg_timer, vha, 0x600d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) "Device in a failed state, returning.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mod_timer(&vha->timer, jiffies + interval * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static __inline__ void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) qla2x00_stop_timer(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) del_timer_sync(&vha->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) vha->timer_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static int qla2x00_do_dpc(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static void qla2x00_rst_aen(scsi_qla_host_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct req_que **, struct rsp_que **);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static void qla2x00_free_fw_dump(struct qla_hw_data *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void qla2x00_mem_free(struct qla_hw_data *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct qla_qpair *qpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct rsp_que *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) rsp->qpair = ha->base_qpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) rsp->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ha->base_qpair->hw = ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ha->base_qpair->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ha->base_qpair->rsp = rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ha->base_qpair->vha = vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ha->base_qpair->srb_mempool = ha->srb_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) INIT_LIST_HEAD(&ha->base_qpair->hints_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ha->base_qpair->enable_class_2 = ql2xenableclass2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* init qpair to this cpu. Will adjust at run time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) qla_cpu_update(rsp->qpair, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ha->base_qpair->pdev = ha->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct rsp_que *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!ha->req_q_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ql_log(ql_log_fatal, vha, 0x003b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) "Unable to allocate memory for request queue ptrs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto fail_req_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!ha->rsp_q_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ql_log(ql_log_fatal, vha, 0x003c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) "Unable to allocate memory for response queue ptrs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) goto fail_rsp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ha->base_qpair == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ql_log(ql_log_warn, vha, 0x00e0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) "Failed to allocate base queue pair memory.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) goto fail_base_qpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) qla_init_base_qpair(vha, req, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!ha->queue_pair_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ql_log(ql_log_fatal, vha, 0x0180,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) "Unable to allocate memory for queue pair ptrs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) goto fail_qpair_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Make sure we record at least the request and response queue zero in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * case we need to free them if part of the probe fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ha->rsp_q_map[0] = rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ha->req_q_map[0] = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) set_bit(0, ha->rsp_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) set_bit(0, ha->req_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) fail_qpair_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) kfree(ha->base_qpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ha->base_qpair = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) fail_base_qpair:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) kfree(ha->rsp_q_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ha->rsp_q_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) fail_rsp_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) kfree(ha->req_q_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ha->req_q_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) fail_req_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (req && req->ring_fx00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dma_free_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) (req->length_fx00 + 1) * sizeof(request_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) req->ring_fx00, req->dma_fx00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) } else if (req && req->ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dma_free_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) (req->length + 1) * sizeof(request_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) req->ring, req->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) kfree(req->outstanding_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (rsp && rsp->ring_fx00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dma_free_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) (rsp->length_fx00 + 1) * sizeof(request_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) rsp->ring_fx00, rsp->dma_fx00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) } else if (rsp && rsp->ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) dma_free_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) (rsp->length + 1) * sizeof(response_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) rsp->ring, rsp->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) kfree(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static void qla2x00_free_queues(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct req_que *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct rsp_que *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (ha->queue_pair_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) kfree(ha->queue_pair_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ha->queue_pair_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (ha->base_qpair) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) kfree(ha->base_qpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ha->base_qpair = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!test_bit(cnt, ha->req_qid_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) req = ha->req_q_map[cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) clear_bit(cnt, ha->req_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ha->req_q_map[cnt] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) qla2x00_free_req_que(ha, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) kfree(ha->req_q_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ha->req_q_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!test_bit(cnt, ha->rsp_qid_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) rsp = ha->rsp_q_map[cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) clear_bit(cnt, ha->rsp_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ha->rsp_q_map[cnt] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) qla2x00_free_rsp_que(ha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) kfree(ha->rsp_q_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ha->rsp_q_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static const char *const pci_bus_modes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) "33", "66", "100", "133",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) uint16_t pci_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (pci_bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) snprintf(str, str_len, "PCI-X (%s MHz)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pci_bus_modes[pci_bus]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) pci_bus = (ha->pci_attr & BIT_8) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static const char *const pci_bus_modes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) "33", "66", "100", "133",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) uint32_t pci_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (pci_is_pcie(ha->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) uint32_t lstat, lspeed, lwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) const char *speed_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) lspeed = lstat & PCI_EXP_LNKCAP_SLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) switch (lspeed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) speed_str = "2.5GT/s";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) speed_str = "5.0GT/s";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) speed_str = "8.0GT/s";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) speed_str = "16.0GT/s";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) speed_str = "<unknown>";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (pci_bus == 0 || pci_bus == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) snprintf(str, str_len, "PCI (%s MHz)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pci_bus_modes[pci_bus >> 3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) pci_bus & 4 ? 2 : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) pci_bus_modes[pci_bus & 3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) char un_str[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ha->fw_minor_version, ha->fw_subminor_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (ha->fw_attributes & BIT_9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) strcat(str, "FLX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return (str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) switch (ha->fw_attributes & 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) case 0x7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) strcat(str, "EF");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) case 0x17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) strcat(str, "TP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) case 0x37:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) strcat(str, "IP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) case 0x77:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) strcat(str, "VI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) sprintf(un_str, "(%x)", ha->fw_attributes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) strcat(str, un_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (ha->fw_attributes & 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) strcat(str, "X");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return (str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) void qla2x00_sp_free_dma(srb_t *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct qla_hw_data *ha = sp->vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct scsi_cmnd *cmd = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (sp->flags & SRB_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) scsi_dma_unmap(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) sp->flags &= ~SRB_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) scsi_prot_sg_count(cmd), cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* List assured to be having elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ctx1->fcp_cmnd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) mempool_free(ctx1, ha->ctx_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) void qla2x00_sp_compl(srb_t *sp, int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct scsi_cmnd *cmd = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct completion *comp = sp->comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) sp->free(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) cmd->result = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) CMD_SP(cmd) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) complete(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) void qla2xxx_qpair_sp_free_dma(srb_t *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct scsi_cmnd *cmd = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct qla_hw_data *ha = sp->fcport->vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (sp->flags & SRB_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) scsi_dma_unmap(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) sp->flags &= ~SRB_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) scsi_prot_sg_count(cmd), cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* List assured to be having elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct crc_context *difctx = sp->u.scmd.crc_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct dsd_dma *dif_dsd, *nxt_dsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) list_for_each_entry_safe(dif_dsd, nxt_dsd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) &difctx->ldif_dma_hndl_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) list_del(&dif_dsd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dif_dsd->dsd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) kfree(dif_dsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) difctx->no_dif_bundl--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) list_for_each_entry_safe(dif_dsd, nxt_dsd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) &difctx->ldif_dsd_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) list_del(&dif_dsd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) dif_dsd->dsd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) kfree(dif_dsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) difctx->no_ldif_dsd--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (difctx->no_ldif_dsd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) "%s: difctx->no_ldif_dsd=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) __func__, difctx->no_ldif_dsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (difctx->no_dif_bundl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) "%s: difctx->no_dif_bundl=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) __func__, difctx->no_dif_bundl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ctx1->fcp_cmnd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) mempool_free(ctx1, ha->ctx_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct scsi_cmnd *cmd = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct completion *comp = sp->comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) sp->free(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) cmd->result = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) CMD_SP(cmd) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) complete(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) scsi_qla_host_t *vha = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) WARN_ON_ONCE(!rport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (ha->mqenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) uint32_t tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) uint16_t hwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct qla_qpair *qpair = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) tag = blk_mq_unique_tag(cmd->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) hwq = blk_mq_unique_tag_to_hwq(tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) qpair = ha->queue_pair_map[hwq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (qpair)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return qla2xxx_mqueuecommand(host, cmd, qpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (ha->flags.eeh_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (ha->flags.pci_channel_io_perm_failure) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ql_dbg(ql_dbg_aer, vha, 0x9010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) "PCI Channel IO permanent failure, exiting "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) "cmd=%p.\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ql_dbg(ql_dbg_aer, vha, 0x9011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cmd->result = DID_REQUEUE << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) rval = fc_remote_port_chkready(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) cmd->result = rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) cmd, rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!vha->flags.difdix_supported &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ql_dbg(ql_dbg_io, vha, 0x3004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ql_dbg(ql_dbg_io, vha, 0x3005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) "Returning DNC, fcport_state=%d loop_state=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) atomic_read(&fcport->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) atomic_read(&base_vha->loop_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) goto qc24_target_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * Return target busy if we've received a non-zero retry_delay_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * in a FCP_RSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (fcport->retry_delay_timestamp == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* retry delay not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) } else if (time_after(jiffies, fcport->retry_delay_timestamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) fcport->retry_delay_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) goto qc24_target_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) sp = scsi_cmd_priv(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) sp->u.scmd.cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) sp->type = SRB_SCSI_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) CMD_SP(cmd) = (void *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) sp->free = qla2x00_sp_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) sp->done = qla2x00_sp_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) rval = ha->isp_ops->start_scsi(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) goto qc24_host_busy_free_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) qc24_host_busy_free_sp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) sp->free(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) qc24_target_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return SCSI_MLQUEUE_TARGET_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) qc24_fail_command:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* For MQ supported I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct qla_qpair *qpair)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) scsi_qla_host_t *vha = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) cmd->result = rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) cmd, rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ql_dbg(ql_dbg_io, vha, 0x3077,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) "Returning DNC, fcport_state=%d loop_state=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) atomic_read(&fcport->state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) atomic_read(&base_vha->loop_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) goto qc24_fail_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) goto qc24_target_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * Return target busy if we've received a non-zero retry_delay_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * in a FCP_RSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (fcport->retry_delay_timestamp == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /* retry delay not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) } else if (time_after(jiffies, fcport->retry_delay_timestamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) fcport->retry_delay_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) goto qc24_target_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) sp = scsi_cmd_priv(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) qla2xxx_init_sp(sp, vha, qpair, fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) sp->u.scmd.cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) sp->type = SRB_SCSI_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) CMD_SP(cmd) = (void *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) sp->free = qla2xxx_qpair_sp_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) sp->done = qla2xxx_qpair_sp_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) rval = ha->isp_ops->start_scsi_mq(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto qc24_host_busy_free_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) qc24_host_busy_free_sp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) sp->free(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) qc24_target_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return SCSI_MLQUEUE_TARGET_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) qc24_fail_command:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * qla2x00_eh_wait_on_command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * Waits for the command to be returned by the Firmware for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * max time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * cmd = Scsi Command to wait on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * Completed in time : QLA_SUCCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * Did not complete in time : QLA_FUNCTION_FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) #define ABORT_POLLING_PERIOD 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) unsigned long wait_iter = ABORT_WAIT_ITER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) scsi_qla_host_t *vha = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) int ret = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ql_dbg(ql_dbg_taskm, vha, 0x8005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) "Return:eh_wait.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) while (CMD_SP(cmd) && wait_iter--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) msleep(ABORT_POLLING_PERIOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (CMD_SP(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ret = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * qla2x00_wait_for_hba_online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * Wait till the HBA is online after going through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * <= MAX_RETRIES_OF_ISP_ABORT or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * finally HBA is disabled ie marked offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * ha - pointer to host adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * Does context switching-Release SPIN_LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * (if any) before calling this routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * Success (Adapter is online) : 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * Failed (Adapter is offline/disabled) : 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int return_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) unsigned long wait_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ha->dpc_active) && time_before(jiffies, wait_online)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (base_vha->flags.online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return_status = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return_status = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return (return_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static inline int test_fcport_count(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) spin_lock_irqsave(&ha->tgt.sess_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ql_dbg(ql_dbg_init, vha, 0x00ec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) "tgt %p, fcport_count=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) vha, vha->fcport_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) res = (vha->fcport_count == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * qla2x00_wait_for_sess_deletion can only be called from remove_one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * it has dependency on UNLOADING flag to stop device discovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) u8 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) qla2x00_mark_all_devices_lost(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (wait_event_timeout(vha->fcport_waitQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) test_fcport_count(vha), HZ) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) flush_workqueue(vha->hw->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * qla2x00_wait_for_hba_ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Wait till the HBA is ready before doing driver unload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * ha - pointer to host adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * Does context switching-Release SPIN_LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * (if any) before calling this routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) while ((qla2x00_reset_active(vha) || ha->dpc_active ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ha->flags.mbox_busy) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (test_bit(UNLOADING, &base_vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) int return_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) unsigned long wait_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ha->dpc_active) && time_before(jiffies, wait_reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ha->flags.chip_reset_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (ha->flags.chip_reset_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return_status = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return_status = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return return_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) #define ISP_REG_DISCONNECT 0xffffffffU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * qla2x00_isp_reg_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * Read the host status register of ISP before aborting the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * ha = pointer to host adapter structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * Either true or false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * Note: Return true if there is register disconnect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (IS_P3P_TYPE(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return ((rd_reg_dword(®82->host_int)) == ISP_REG_DISCONNECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return ((rd_reg_dword(®->host_status)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ISP_REG_DISCONNECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * qla2xxx_eh_abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * The abort function will abort the specified command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * cmd = Linux SCSI command packet to be aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * Either SUCCESS or FAILED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * Only return FAILED if command not returned by firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) qla2xxx_eh_abort(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) scsi_qla_host_t *vha = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) DECLARE_COMPLETION_ONSTACK(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) uint64_t lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) uint32_t ratov_j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct qla_qpair *qpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) int fast_fail_status = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (qla2x00_isp_reg_stat(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ql_log(ql_log_info, vha, 0x8042,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) "PCI/Register disconnect, exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* Save any FAST_IO_FAIL value to return later if abort succeeds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ret = fc_block_scsi_eh(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) fast_fail_status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) sp = scsi_cmd_priv(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) qpair = sp->qpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if ((sp->fcport && sp->fcport->deleted) || !qpair)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) spin_lock_irqsave(qpair->qp_lock_ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) sp->comp = ∁
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) id = cmd->device->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) lun = cmd->device->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ql_dbg(ql_dbg_taskm, vha, 0x8002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) vha->host_no, id, lun, sp, cmd, sp->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * Abort will release the original Command/sp from FW. Let the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * original command call scsi_done. In return, he will wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * this sleeping thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) rval = ha->isp_ops->abort_command(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) ql_dbg(ql_dbg_taskm, vha, 0x8003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /* Wait for the command completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ratov_j = ha->r_a_tov/10 * 4 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) ratov_j = msecs_to_jiffies(ratov_j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) switch (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) case QLA_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!wait_for_completion_timeout(&comp, ratov_j)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ql_dbg(ql_dbg_taskm, vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) __func__, ha->r_a_tov/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) ret = fast_fail_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) sp->comp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) ql_log(ql_log_info, vha, 0x801c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) vha->host_no, id, lun, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) uint64_t l, enum nexus_wait_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) int cnt, match, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct req_que *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct scsi_cmnd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) status = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) req = vha->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) for (cnt = 1; status == QLA_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) cnt < req->num_outstanding_cmds; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) sp = req->outstanding_cmds[cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (!sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (sp->type != SRB_SCSI_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (vha->vp_idx != sp->vha->vp_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) cmd = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) case WAIT_HOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) case WAIT_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) match = cmd->device->id == t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) case WAIT_LUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) match = (cmd->device->id == t &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) cmd->device->lun == l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) status = qla2x00_eh_wait_on_command(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static char *reset_errors[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) "HBA not online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) "HBA not ready",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) "Task management failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) "Waiting for command completions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) scsi_qla_host_t *vha = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) err = fc_block_scsi_eh(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (fcport->deleted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ql_log(ql_log_info, vha, 0x8009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) cmd->device->id, cmd->device->lun, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ql_log(ql_log_warn, vha, 0x800a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) "Wait for hba online failed for cmd=%p.\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) goto eh_reset_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) err = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (do_reset(fcport, cmd->device->lun, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ql_log(ql_log_warn, vha, 0x800c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) "do_reset failed for cmd=%p.\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) goto eh_reset_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) err = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) cmd->device->lun, type) != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) ql_log(ql_log_warn, vha, 0x800d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) "wait for pending cmds failed for cmd=%p.\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) goto eh_reset_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ql_log(ql_log_info, vha, 0x800e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) vha->host_no, cmd->device->id, cmd->device->lun, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) eh_reset_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ql_log(ql_log_info, vha, 0x800f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) scsi_qla_host_t *vha = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (qla2x00_isp_reg_stat(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ql_log(ql_log_info, vha, 0x803e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) "PCI/Register disconnect, exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) ha->isp_ops->lun_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) scsi_qla_host_t *vha = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (qla2x00_isp_reg_stat(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ql_log(ql_log_info, vha, 0x803f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) "PCI/Register disconnect, exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) ha->isp_ops->target_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * qla2xxx_eh_bus_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * The bus reset function will reset the bus and abort any executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * cmd = Linux SCSI command packet of the command that cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * bus reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * SUCCESS/FAILURE (defined as macro in scsi.h).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) scsi_qla_host_t *vha = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) uint64_t lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (qla2x00_isp_reg_stat(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) ql_log(ql_log_info, vha, 0x8040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) "PCI/Register disconnect, exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) id = cmd->device->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) lun = cmd->device->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ret = fc_block_scsi_eh(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (qla2x00_chip_is_down(vha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ql_log(ql_log_info, vha, 0x8012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) ql_log(ql_log_fatal, vha, 0x8013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) "Wait for hba online failed board disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) goto eh_bus_reset_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (ret == FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) goto eh_bus_reset_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /* Flush outstanding commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) ql_log(ql_log_warn, vha, 0x8014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) "Wait for pending commands failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) eh_bus_reset_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ql_log(ql_log_warn, vha, 0x802b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) "BUS RESET %s nexus=%ld:%d:%llu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * qla2xxx_eh_host_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * The reset function will reset the Adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * cmd = Linux SCSI command packet of the command that cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * adapter reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * Either SUCCESS or FAILED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) scsi_qla_host_t *vha = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) uint64_t lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (qla2x00_isp_reg_stat(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ql_log(ql_log_info, vha, 0x8041,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) "PCI/Register disconnect, exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) schedule_work(&ha->board_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) id = cmd->device->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) lun = cmd->device->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) ql_log(ql_log_info, vha, 0x8018,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * No point in issuing another reset if one is active. Also do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * attempt a reset if we are updating flash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) goto eh_host_reset_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (vha != base_vha) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (qla2x00_vp_abort_isp(vha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) goto eh_host_reset_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (IS_P3P_TYPE(vha->hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (!qla82xx_fcoe_ctx_reset(vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* Ctx reset success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) goto eh_host_reset_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /* fall thru if ctx reset failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (ha->wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) flush_workqueue(ha->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (ha->isp_ops->abort_isp(base_vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* failed. schedule dpc to try */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) ql_log(ql_log_warn, vha, 0x802a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) "wait for hba online failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) goto eh_host_reset_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /* Waiting for command to be returned to OS.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) eh_host_reset_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) ql_log(ql_log_info, vha, 0x8017,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * qla2x00_loop_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * Issue loop reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * 0 = success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) qla2x00_loop_reset(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) atomic_set(&vha->loop_state, LOOP_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) qla2x00_mark_all_devices_lost(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ret = qla2x00_full_login_lip(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (ret != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ql_dbg(ql_dbg_taskm, vha, 0x802d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) "full_login_lip=%d.\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (ha->flags.enable_lip_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) ret = qla2x00_lip_reset(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (ret != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) ql_dbg(ql_dbg_taskm, vha, 0x802e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) "lip_reset failed (%d).\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) /* Issue marker command only when we are going to start the I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) vha->marker_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * The caller must ensure that no completion interrupts will happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * while this function is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) __releases(qp->qp_lock_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) __acquires(qp->qp_lock_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) DECLARE_COMPLETION_ONSTACK(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) scsi_qla_host_t *vha = qp->vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct scsi_cmnd *cmd = GET_CMD_SP(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) bool ret_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) uint32_t ratov_j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) lockdep_assert_held(qp->qp_lock_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (qla2x00_chip_is_down(vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) sp->done(sp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) !qla2x00_isp_reg_stat(ha))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (sp->comp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) sp->done(sp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) sp->comp = ∁
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) rval = ha->isp_ops->abort_command(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /* Wait for command completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) ret_cmd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) ratov_j = ha->r_a_tov/10 * 4 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) ratov_j = msecs_to_jiffies(ratov_j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) switch (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) case QLA_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (wait_for_completion_timeout(&comp, ratov_j)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) ql_dbg(ql_dbg_taskm, vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) __func__, ha->r_a_tov/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) ret_cmd = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /* else FW return SP to driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ret_cmd = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) spin_lock_irqsave(qp->qp_lock_ptr, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (ret_cmd && blk_mq_request_started(cmd->request))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) sp->done(sp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) sp->done(sp, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * The caller must ensure that no completion interrupts will happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * while this function is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) scsi_qla_host_t *vha = qp->vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct req_que *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) struct qla_tgt_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (!ha->req_q_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) spin_lock_irqsave(qp->qp_lock_ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) req = qp->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) sp = req->outstanding_cmds[cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (sp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) switch (sp->cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) case TYPE_SRB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) qla2x00_abort_srb(qp, sp, res, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) case TYPE_TGT_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (!vha->hw->tgt.tgt_ops || !tgt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) qla_ini_mode_enabled(vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) cmd = (struct qla_tgt_cmd *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) cmd->aborted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) case TYPE_TGT_TMCMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /* Skip task management functions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) req->outstanding_cmds[cnt] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * The caller must ensure that no completion interrupts will happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * while this function is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) int que;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* Continue only if initialization complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (!ha->base_qpair)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) __qla2x00_abort_all_cmds(ha->base_qpair, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (!ha->queue_pair_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) for (que = 0; que < ha->max_qpairs; que++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (!ha->queue_pair_map[que])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) qla2xxx_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (!rport || fc_remote_port_chkready(rport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) sdev->hostdata = *(fc_port_t **)rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) qla2xxx_slave_configure(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) scsi_qla_host_t *vha = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct req_que *req = vha->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (IS_T10_PI_CAPABLE(vha->hw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) scsi_change_queue_depth(sdev, req->max_q_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) qla2xxx_slave_destroy(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) sdev->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * @ha: HA context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * At exit, the @ha's flags.enable_64bit_addressing set to indicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * supported addressing method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) qla2x00_config_dma_addressing(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /* Assume a 32bit DMA mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) ha->flags.enable_64bit_addressing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) /* Any upper-dword bits set? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) /* Ok, a 64bit DMA mask is applicable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) ha->flags.enable_64bit_addressing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) qla2x00_enable_intrs(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) ha->interrupts_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /* enable risc and host interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) rd_reg_word(®->ictrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) qla2x00_disable_intrs(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) ha->interrupts_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) /* disable risc and host interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) wrt_reg_word(®->ictrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) rd_reg_word(®->ictrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) qla24xx_enable_intrs(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) ha->interrupts_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) rd_reg_dword(®->ictrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) qla24xx_disable_intrs(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (IS_NOPOLLING_TYPE(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ha->interrupts_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) wrt_reg_dword(®->ictrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) rd_reg_dword(®->ictrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) qla2x00_iospace_config(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) resource_size_t pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) uint16_t msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (pci_request_selected_regions(ha->pdev, ha->bars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) QLA2XXX_DRIVER_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (!(ha->bars & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) goto skip_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /* We only need PIO for Flash operations on ISP2312 v2 chips. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) pio = pci_resource_start(ha->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) "Invalid pci I/O region size (%s).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) pio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) "Region #0 no a PIO resource (%s).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) pio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) ha->pio_address = pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) "PIO address=%llu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) (unsigned long long)ha->pio_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) skip_pio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* Use MMIO operations for all accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) "Region #1 not an MMIO resource (%s), aborting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) "Invalid PCI mem region size (%s), aborting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (!ha->iobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) "Cannot remap MMIO (%s), aborting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /* Determine queue resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) ha->max_req_queues = ha->max_rsp_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) ha->msix_count = QLA_BASE_VECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) /* Check if FW supports MQ or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (!(ha->fw_attributes & BIT_6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) goto mqiobase_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (!ql2xmqsupport || !ql2xnvmeenable ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) goto mqiobase_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) pci_resource_len(ha->pdev, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (ha->mqiobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) "MQIO Base=%p.\n", ha->mqiobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) /* Read MSIX vector size of the board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) ha->msix_count = msix + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /* Max queues are bounded by available msix vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) /* MB interrupt uses 1 vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) ha->max_req_queues = ha->msix_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) ha->max_rsp_queues = ha->max_req_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) /* Queue pairs is the max value minus the base queue pair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) ha->max_qpairs = ha->max_rsp_queues - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) "Max no of queues pairs: %d.\n", ha->max_qpairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) ql_log_pci(ql_log_info, ha->pdev, 0x001a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) "MSI-X vector count: %d.\n", ha->msix_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) ql_log_pci(ql_log_info, ha->pdev, 0x001b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) "BAR 3 not enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) mqiobase_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) "MSIX Count: %d.\n", ha->msix_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) iospace_error_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) return (-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) qla83xx_iospace_config(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) uint16_t msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (pci_request_selected_regions(ha->pdev, ha->bars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) QLA2XXX_DRIVER_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) /* Use MMIO operations for all accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) "Invalid pci I/O region size (%s).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) "Invalid PCI mem region size (%s), aborting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (!ha->iobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) "Cannot remap MMIO (%s), aborting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) pci_name(ha->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) goto iospace_error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) /* 83XX 26XX always use MQ type access for queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * - mbar 2, a.k.a region 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) ha->max_req_queues = ha->max_rsp_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) ha->msix_count = QLA_BASE_VECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) pci_resource_len(ha->pdev, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (!ha->mqiobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) "BAR2/region4 not enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) goto mqiobase_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) pci_resource_len(ha->pdev, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (ha->msixbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /* Read MSIX vector size of the board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) pci_read_config_word(ha->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) QLA_83XX_PCI_MSIX_CONTROL, &msix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * By default, driver uses at least two msix vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * (default & rspq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (ql2xmqsupport || ql2xnvmeenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) /* MB interrupt uses 1 vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) ha->max_req_queues = ha->msix_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /* ATIOQ needs 1 vector. That's 1 less QPair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (QLA_TGT_MODE_ENABLED())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) ha->max_req_queues--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ha->max_rsp_queues = ha->max_req_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* Queue pairs is the max value minus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * the base queue pair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) ha->max_qpairs = ha->max_req_queues - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) "Max no of queues pairs: %d.\n", ha->max_qpairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) ql_log_pci(ql_log_info, ha->pdev, 0x011c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) "MSI-X vector count: %d.\n", ha->msix_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) ql_log_pci(ql_log_info, ha->pdev, 0x011e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) "BAR 1 not enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) mqiobase_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) "MSIX Count: %d.\n", ha->msix_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) iospace_error_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) static struct isp_operations qla2100_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) .pci_config = qla2100_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) .reset_chip = qla2x00_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) .chip_diag = qla2x00_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) .config_rings = qla2x00_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) .reset_adapter = qla2x00_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) .nvram_config = qla2x00_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) .update_fw_options = qla2x00_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) .load_risc = qla2x00_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) .pci_info_str = qla2x00_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) .fw_version_str = qla2x00_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) .intr_handler = qla2100_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) .enable_intrs = qla2x00_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) .disable_intrs = qla2x00_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) .abort_command = qla2x00_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) .target_reset = qla2x00_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) .lun_reset = qla2x00_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) .fabric_login = qla2x00_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) .fabric_logout = qla2x00_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) .calc_req_entries = qla2x00_calc_iocbs_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) .build_iocbs = qla2x00_build_scsi_iocbs_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) .prep_ms_iocb = qla2x00_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) .read_nvram = qla2x00_read_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) .write_nvram = qla2x00_write_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) .fw_dump = qla2100_fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) .beacon_on = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) .beacon_off = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) .beacon_blink = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) .read_optrom = qla2x00_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) .write_optrom = qla2x00_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) .get_flash_version = qla2x00_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) .start_scsi = qla2x00_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) .start_scsi_mq = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) .abort_isp = qla2x00_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) .iospace_config = qla2x00_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static struct isp_operations qla2300_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) .pci_config = qla2300_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) .reset_chip = qla2x00_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) .chip_diag = qla2x00_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) .config_rings = qla2x00_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) .reset_adapter = qla2x00_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) .nvram_config = qla2x00_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) .update_fw_options = qla2x00_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) .load_risc = qla2x00_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) .pci_info_str = qla2x00_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) .fw_version_str = qla2x00_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) .intr_handler = qla2300_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) .enable_intrs = qla2x00_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) .disable_intrs = qla2x00_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) .abort_command = qla2x00_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) .target_reset = qla2x00_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) .lun_reset = qla2x00_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) .fabric_login = qla2x00_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) .fabric_logout = qla2x00_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) .calc_req_entries = qla2x00_calc_iocbs_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) .build_iocbs = qla2x00_build_scsi_iocbs_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) .prep_ms_iocb = qla2x00_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) .read_nvram = qla2x00_read_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) .write_nvram = qla2x00_write_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) .fw_dump = qla2300_fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) .beacon_on = qla2x00_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) .beacon_off = qla2x00_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) .beacon_blink = qla2x00_beacon_blink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) .read_optrom = qla2x00_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) .write_optrom = qla2x00_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) .get_flash_version = qla2x00_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) .start_scsi = qla2x00_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) .start_scsi_mq = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) .abort_isp = qla2x00_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) .iospace_config = qla2x00_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) static struct isp_operations qla24xx_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) .pci_config = qla24xx_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) .reset_chip = qla24xx_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) .chip_diag = qla24xx_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) .config_rings = qla24xx_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) .reset_adapter = qla24xx_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) .nvram_config = qla24xx_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) .update_fw_options = qla24xx_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) .load_risc = qla24xx_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) .pci_info_str = qla24xx_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) .fw_version_str = qla24xx_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) .intr_handler = qla24xx_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) .enable_intrs = qla24xx_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) .disable_intrs = qla24xx_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) .abort_command = qla24xx_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) .target_reset = qla24xx_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) .lun_reset = qla24xx_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) .fabric_login = qla24xx_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) .fabric_logout = qla24xx_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) .calc_req_entries = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) .build_iocbs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) .prep_ms_iocb = qla24xx_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) .read_nvram = qla24xx_read_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) .write_nvram = qla24xx_write_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) .fw_dump = qla24xx_fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) .beacon_on = qla24xx_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) .beacon_off = qla24xx_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) .beacon_blink = qla24xx_beacon_blink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) .read_optrom = qla24xx_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) .write_optrom = qla24xx_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) .get_flash_version = qla24xx_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) .start_scsi = qla24xx_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) .start_scsi_mq = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) .abort_isp = qla2x00_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) .iospace_config = qla2x00_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) static struct isp_operations qla25xx_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) .pci_config = qla25xx_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) .reset_chip = qla24xx_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) .chip_diag = qla24xx_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) .config_rings = qla24xx_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) .reset_adapter = qla24xx_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) .nvram_config = qla24xx_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) .update_fw_options = qla24xx_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) .load_risc = qla24xx_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) .pci_info_str = qla24xx_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) .fw_version_str = qla24xx_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) .intr_handler = qla24xx_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) .enable_intrs = qla24xx_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) .disable_intrs = qla24xx_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) .abort_command = qla24xx_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) .target_reset = qla24xx_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) .lun_reset = qla24xx_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) .fabric_login = qla24xx_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) .fabric_logout = qla24xx_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) .calc_req_entries = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) .build_iocbs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) .prep_ms_iocb = qla24xx_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) .read_nvram = qla25xx_read_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) .write_nvram = qla25xx_write_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) .fw_dump = qla25xx_fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) .beacon_on = qla24xx_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) .beacon_off = qla24xx_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) .beacon_blink = qla24xx_beacon_blink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) .read_optrom = qla25xx_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) .write_optrom = qla24xx_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) .get_flash_version = qla24xx_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) .start_scsi = qla24xx_dif_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) .abort_isp = qla2x00_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) .iospace_config = qla2x00_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) static struct isp_operations qla81xx_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) .pci_config = qla25xx_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) .reset_chip = qla24xx_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) .chip_diag = qla24xx_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) .config_rings = qla24xx_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) .reset_adapter = qla24xx_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) .nvram_config = qla81xx_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) .update_fw_options = qla24xx_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) .load_risc = qla81xx_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) .pci_info_str = qla24xx_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) .fw_version_str = qla24xx_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) .intr_handler = qla24xx_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) .enable_intrs = qla24xx_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) .disable_intrs = qla24xx_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) .abort_command = qla24xx_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) .target_reset = qla24xx_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) .lun_reset = qla24xx_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) .fabric_login = qla24xx_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) .fabric_logout = qla24xx_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) .calc_req_entries = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) .build_iocbs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) .prep_ms_iocb = qla24xx_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) .read_nvram = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) .write_nvram = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) .fw_dump = qla81xx_fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) .beacon_on = qla24xx_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) .beacon_off = qla24xx_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) .beacon_blink = qla83xx_beacon_blink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) .read_optrom = qla25xx_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) .write_optrom = qla24xx_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) .get_flash_version = qla24xx_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) .start_scsi = qla24xx_dif_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) .abort_isp = qla2x00_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) .iospace_config = qla2x00_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static struct isp_operations qla82xx_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) .pci_config = qla82xx_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) .reset_chip = qla82xx_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) .chip_diag = qla24xx_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) .config_rings = qla82xx_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) .reset_adapter = qla24xx_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) .nvram_config = qla81xx_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) .update_fw_options = qla24xx_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) .load_risc = qla82xx_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) .pci_info_str = qla24xx_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) .fw_version_str = qla24xx_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) .intr_handler = qla82xx_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) .enable_intrs = qla82xx_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) .disable_intrs = qla82xx_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) .abort_command = qla24xx_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) .target_reset = qla24xx_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) .lun_reset = qla24xx_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) .fabric_login = qla24xx_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) .fabric_logout = qla24xx_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) .calc_req_entries = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) .build_iocbs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) .prep_ms_iocb = qla24xx_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) .read_nvram = qla24xx_read_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) .write_nvram = qla24xx_write_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) .fw_dump = qla82xx_fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) .beacon_on = qla82xx_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) .beacon_off = qla82xx_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) .beacon_blink = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) .read_optrom = qla82xx_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) .write_optrom = qla82xx_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) .get_flash_version = qla82xx_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) .start_scsi = qla82xx_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) .start_scsi_mq = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) .abort_isp = qla82xx_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) .iospace_config = qla82xx_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) static struct isp_operations qla8044_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) .pci_config = qla82xx_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) .reset_chip = qla82xx_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) .chip_diag = qla24xx_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) .config_rings = qla82xx_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) .reset_adapter = qla24xx_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) .nvram_config = qla81xx_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) .update_fw_options = qla24xx_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) .load_risc = qla82xx_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) .pci_info_str = qla24xx_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) .fw_version_str = qla24xx_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) .intr_handler = qla8044_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) .enable_intrs = qla82xx_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) .disable_intrs = qla82xx_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) .abort_command = qla24xx_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) .target_reset = qla24xx_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) .lun_reset = qla24xx_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) .fabric_login = qla24xx_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) .fabric_logout = qla24xx_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) .calc_req_entries = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) .build_iocbs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) .prep_ms_iocb = qla24xx_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) .read_nvram = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) .write_nvram = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) .fw_dump = qla8044_fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) .beacon_on = qla82xx_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) .beacon_off = qla82xx_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) .beacon_blink = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) .read_optrom = qla8044_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) .write_optrom = qla8044_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) .get_flash_version = qla82xx_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) .start_scsi = qla82xx_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) .start_scsi_mq = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) .abort_isp = qla8044_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) .iospace_config = qla82xx_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) static struct isp_operations qla83xx_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) .pci_config = qla25xx_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) .reset_chip = qla24xx_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) .chip_diag = qla24xx_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) .config_rings = qla24xx_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) .reset_adapter = qla24xx_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) .nvram_config = qla81xx_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) .update_fw_options = qla24xx_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) .load_risc = qla81xx_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) .pci_info_str = qla24xx_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) .fw_version_str = qla24xx_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) .intr_handler = qla24xx_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) .enable_intrs = qla24xx_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) .disable_intrs = qla24xx_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) .abort_command = qla24xx_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) .target_reset = qla24xx_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) .lun_reset = qla24xx_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) .fabric_login = qla24xx_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) .fabric_logout = qla24xx_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) .calc_req_entries = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) .build_iocbs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) .prep_ms_iocb = qla24xx_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) .read_nvram = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) .write_nvram = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) .fw_dump = qla83xx_fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) .beacon_on = qla24xx_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) .beacon_off = qla24xx_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) .beacon_blink = qla83xx_beacon_blink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) .read_optrom = qla25xx_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) .write_optrom = qla24xx_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) .get_flash_version = qla24xx_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) .start_scsi = qla24xx_dif_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) .abort_isp = qla2x00_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) .iospace_config = qla83xx_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) static struct isp_operations qlafx00_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) .pci_config = qlafx00_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) .reset_chip = qlafx00_soft_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) .chip_diag = qlafx00_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) .config_rings = qlafx00_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) .reset_adapter = qlafx00_soft_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) .nvram_config = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) .update_fw_options = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) .load_risc = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) .pci_info_str = qlafx00_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) .fw_version_str = qlafx00_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) .intr_handler = qlafx00_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) .enable_intrs = qlafx00_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) .disable_intrs = qlafx00_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) .abort_command = qla24xx_async_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) .target_reset = qlafx00_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) .lun_reset = qlafx00_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) .fabric_login = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) .fabric_logout = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) .calc_req_entries = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) .build_iocbs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) .prep_ms_iocb = qla24xx_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) .read_nvram = qla24xx_read_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) .write_nvram = qla24xx_write_nvram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) .fw_dump = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) .beacon_on = qla24xx_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) .beacon_off = qla24xx_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) .beacon_blink = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) .read_optrom = qla24xx_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) .write_optrom = qla24xx_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) .get_flash_version = qla24xx_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) .start_scsi = qlafx00_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) .start_scsi_mq = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) .abort_isp = qlafx00_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) .iospace_config = qlafx00_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) .initialize_adapter = qlafx00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) static struct isp_operations qla27xx_isp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) .pci_config = qla25xx_pci_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) .reset_chip = qla24xx_reset_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) .chip_diag = qla24xx_chip_diag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) .config_rings = qla24xx_config_rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) .reset_adapter = qla24xx_reset_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) .nvram_config = qla81xx_nvram_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) .update_fw_options = qla24xx_update_fw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) .load_risc = qla81xx_load_risc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) .pci_info_str = qla24xx_pci_info_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) .fw_version_str = qla24xx_fw_version_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) .intr_handler = qla24xx_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) .enable_intrs = qla24xx_enable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) .disable_intrs = qla24xx_disable_intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) .abort_command = qla24xx_abort_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) .target_reset = qla24xx_abort_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) .lun_reset = qla24xx_lun_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) .fabric_login = qla24xx_login_fabric,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) .fabric_logout = qla24xx_fabric_logout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) .calc_req_entries = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) .build_iocbs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) .prep_ms_iocb = qla24xx_prep_ms_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) .read_nvram = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) .write_nvram = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) .fw_dump = qla27xx_fwdump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) .mpi_fw_dump = qla27xx_mpi_fwdump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) .beacon_on = qla24xx_beacon_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) .beacon_off = qla24xx_beacon_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) .beacon_blink = qla83xx_beacon_blink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) .read_optrom = qla25xx_read_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) .write_optrom = qla24xx_write_optrom_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) .get_flash_version = qla24xx_get_flash_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) .start_scsi = qla24xx_dif_start_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) .abort_isp = qla2x00_abort_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) .iospace_config = qla83xx_iospace_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) .initialize_adapter = qla2x00_initialize_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) qla2x00_set_isp_flags(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) ha->device_type = DT_EXTENDED_IDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) switch (ha->pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) case PCI_DEVICE_ID_QLOGIC_ISP2100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) ha->isp_type |= DT_ISP2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) ha->device_type &= ~DT_EXTENDED_IDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) ha->fw_srisc_address = RISC_START_ADDRESS_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) case PCI_DEVICE_ID_QLOGIC_ISP2200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) ha->isp_type |= DT_ISP2200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) ha->device_type &= ~DT_EXTENDED_IDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) ha->fw_srisc_address = RISC_START_ADDRESS_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) case PCI_DEVICE_ID_QLOGIC_ISP2300:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) ha->isp_type |= DT_ISP2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) ha->fw_srisc_address = RISC_START_ADDRESS_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) case PCI_DEVICE_ID_QLOGIC_ISP2312:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) ha->isp_type |= DT_ISP2312;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) ha->fw_srisc_address = RISC_START_ADDRESS_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) case PCI_DEVICE_ID_QLOGIC_ISP2322:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) ha->isp_type |= DT_ISP2322;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (ha->pdev->subsystem_vendor == 0x1028 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) ha->pdev->subsystem_device == 0x0170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) ha->device_type |= DT_OEM_001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) ha->fw_srisc_address = RISC_START_ADDRESS_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) case PCI_DEVICE_ID_QLOGIC_ISP6312:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) ha->isp_type |= DT_ISP6312;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) ha->fw_srisc_address = RISC_START_ADDRESS_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) case PCI_DEVICE_ID_QLOGIC_ISP6322:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) ha->isp_type |= DT_ISP6322;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) ha->fw_srisc_address = RISC_START_ADDRESS_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) case PCI_DEVICE_ID_QLOGIC_ISP2422:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) ha->isp_type |= DT_ISP2422;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) case PCI_DEVICE_ID_QLOGIC_ISP2432:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) ha->isp_type |= DT_ISP2432;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) case PCI_DEVICE_ID_QLOGIC_ISP8432:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) ha->isp_type |= DT_ISP8432;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) case PCI_DEVICE_ID_QLOGIC_ISP5422:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) ha->isp_type |= DT_ISP5422;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) case PCI_DEVICE_ID_QLOGIC_ISP5432:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) ha->isp_type |= DT_ISP5432;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) case PCI_DEVICE_ID_QLOGIC_ISP2532:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) ha->isp_type |= DT_ISP2532;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) case PCI_DEVICE_ID_QLOGIC_ISP8001:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) ha->isp_type |= DT_ISP8001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) case PCI_DEVICE_ID_QLOGIC_ISP8021:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) ha->isp_type |= DT_ISP8021;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) /* Initialize 82XX ISP flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) qla82xx_init_flags(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) case PCI_DEVICE_ID_QLOGIC_ISP8044:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) ha->isp_type |= DT_ISP8044;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /* Initialize 82XX ISP flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) qla82xx_init_flags(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) case PCI_DEVICE_ID_QLOGIC_ISP2031:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) ha->isp_type |= DT_ISP2031;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) ha->device_type |= DT_T10_PI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) case PCI_DEVICE_ID_QLOGIC_ISP8031:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) ha->isp_type |= DT_ISP8031;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) ha->device_type |= DT_T10_PI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) case PCI_DEVICE_ID_QLOGIC_ISPF001:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) ha->isp_type |= DT_ISPFX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) case PCI_DEVICE_ID_QLOGIC_ISP2071:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) ha->isp_type |= DT_ISP2071;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) ha->device_type |= DT_T10_PI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) case PCI_DEVICE_ID_QLOGIC_ISP2271:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) ha->isp_type |= DT_ISP2271;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) ha->device_type |= DT_T10_PI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) case PCI_DEVICE_ID_QLOGIC_ISP2261:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) ha->isp_type |= DT_ISP2261;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) ha->device_type |= DT_T10_PI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) case PCI_DEVICE_ID_QLOGIC_ISP2081:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) case PCI_DEVICE_ID_QLOGIC_ISP2089:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) ha->isp_type |= DT_ISP2081;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) ha->device_type |= DT_T10_PI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) case PCI_DEVICE_ID_QLOGIC_ISP2281:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) case PCI_DEVICE_ID_QLOGIC_ISP2289:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) ha->isp_type |= DT_ISP2281;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) ha->device_type |= DT_ZIO_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) ha->device_type |= DT_FWI2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) ha->device_type |= DT_IIDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) ha->device_type |= DT_T10_PI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) ha->fw_srisc_address = RISC_START_ADDRESS_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) if (IS_QLA82XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) ha->port_no = ha->portnum & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) /* Get adapter physical port no from interrupt pin register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) IS_QLA27XX(ha) || IS_QLA28XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) ha->port_no--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) ha->port_no = !(ha->port_no & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) ha->device_type, ha->port_no, ha->fw_srisc_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) qla2xxx_scan_start(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) scsi_qla_host_t *vha = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) if (vha->hw->flags.running_gold_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) set_bit(RSCN_UPDATE, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) scsi_qla_host_t *vha = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) if (test_bit(UNLOADING, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (!vha->host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (time > vha->hw->loop_reset_delay * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) return atomic_read(&vha->loop_state) == LOOP_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) static void qla2x00_iocb_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) struct scsi_qla_host *vha = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) struct scsi_qla_host, iocb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) int i = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) if (test_bit(UNLOADING, &base_vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) while (!list_empty(&vha->work_list) && i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) qla2x00_do_work(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) spin_lock_irqsave(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) spin_unlock_irqrestore(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) * PCI driver interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) scsi_qla_host_t *base_vha = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) struct qla_hw_data *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) char pci_info[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) char fw_str[30], wq_name[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) struct scsi_host_template *sht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) int bars, mem_only = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) uint16_t req_length = 0, rsp_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) struct req_que *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) struct rsp_que *rsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) sht = &qla2xxx_driver_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) bars = pci_select_bars(pdev, IORESOURCE_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) mem_only = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) "Mem only adapter.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) "Bars=%d.\n", bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (mem_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (pci_enable_device_mem(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) if (pci_enable_device(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) if (is_kdump_kernel()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) ql2xmqsupport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) ql2xallocfwdump = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) /* This may fail but that's ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) pci_enable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (!ha) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) ql_log_pci(ql_log_fatal, pdev, 0x0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) "Unable to allocate memory for ha.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) goto disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) "Memory allocated for ha=%p.\n", ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) ha->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) INIT_LIST_HEAD(&ha->tgt.q_full_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) spin_lock_init(&ha->tgt.q_full_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) spin_lock_init(&ha->tgt.sess_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) spin_lock_init(&ha->tgt.atio_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) atomic_set(&ha->nvme_active_aen_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) /* Clear our data area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) ha->bars = bars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) ha->mem_only = mem_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) spin_lock_init(&ha->hardware_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) spin_lock_init(&ha->vport_slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) mutex_init(&ha->selflogin_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) mutex_init(&ha->optrom_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) /* Set ISP-type information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) qla2x00_set_isp_flags(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) /* Set EEH reset type to fundamental if required by hba */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) pdev->needs_freset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) ha->prev_topology = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) ha->init_cb_size = sizeof(init_cb_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) ha->link_data_rate = PORT_SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) ha->optrom_size = OPTROM_SIZE_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) ha->max_exchg = FW_MAX_EXCHANGES_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) atomic_set(&ha->num_pend_mbx_stage1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) atomic_set(&ha->num_pend_mbx_stage2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) atomic_set(&ha->num_pend_mbx_stage3, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) /* Assign ISP specific operations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) if (IS_QLA2100(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) req_length = REQUEST_ENTRY_CNT_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) rsp_length = RESPONSE_ENTRY_CNT_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) ha->gid_list_info_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) ha->flash_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) ha->flash_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) ha->nvram_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) ha->nvram_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) ha->isp_ops = &qla2100_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) } else if (IS_QLA2200(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) req_length = REQUEST_ENTRY_CNT_2200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) rsp_length = RESPONSE_ENTRY_CNT_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) ha->gid_list_info_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) ha->flash_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) ha->flash_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) ha->nvram_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) ha->nvram_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) ha->isp_ops = &qla2100_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) } else if (IS_QLA23XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) req_length = REQUEST_ENTRY_CNT_2200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) rsp_length = RESPONSE_ENTRY_CNT_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) ha->gid_list_info_size = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (IS_QLA2322(ha) || IS_QLA6322(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) ha->optrom_size = OPTROM_SIZE_2322;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) ha->flash_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) ha->flash_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) ha->nvram_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) ha->nvram_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) ha->isp_ops = &qla2300_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) } else if (IS_QLA24XX_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) req_length = REQUEST_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) rsp_length = RESPONSE_ENTRY_CNT_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) ha->gid_list_info_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) ha->optrom_size = OPTROM_SIZE_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) ha->isp_ops = &qla24xx_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) } else if (IS_QLA25XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) req_length = REQUEST_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) rsp_length = RESPONSE_ENTRY_CNT_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) ha->gid_list_info_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) ha->optrom_size = OPTROM_SIZE_25XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) ha->isp_ops = &qla25xx_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) } else if (IS_QLA81XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) req_length = REQUEST_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) rsp_length = RESPONSE_ENTRY_CNT_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) ha->gid_list_info_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) ha->optrom_size = OPTROM_SIZE_81XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) ha->isp_ops = &qla81xx_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) ha->nvram_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) ha->nvram_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) } else if (IS_QLA82XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) req_length = REQUEST_ENTRY_CNT_82XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) rsp_length = RESPONSE_ENTRY_CNT_82XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) ha->gid_list_info_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) ha->optrom_size = OPTROM_SIZE_82XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) ha->isp_ops = &qla82xx_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) } else if (IS_QLA8044(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) req_length = REQUEST_ENTRY_CNT_82XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) rsp_length = RESPONSE_ENTRY_CNT_82XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) ha->gid_list_info_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) ha->optrom_size = OPTROM_SIZE_83XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) ha->isp_ops = &qla8044_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) } else if (IS_QLA83XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) ha->portnum = PCI_FUNC(ha->pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) req_length = REQUEST_ENTRY_CNT_83XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) rsp_length = RESPONSE_ENTRY_CNT_83XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) ha->gid_list_info_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) ha->optrom_size = OPTROM_SIZE_83XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) ha->isp_ops = &qla83xx_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) ha->nvram_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) ha->nvram_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) } else if (IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) req_length = REQUEST_ENTRY_CNT_FX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) rsp_length = RESPONSE_ENTRY_CNT_FX00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) ha->isp_ops = &qlafx00_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) ha->port_down_retry_count = 30; /* default value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) ha->mr.fw_hbt_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) ha->mr.host_info_resend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) } else if (IS_QLA27XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) ha->portnum = PCI_FUNC(ha->pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) req_length = REQUEST_ENTRY_CNT_83XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) rsp_length = RESPONSE_ENTRY_CNT_83XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) ha->gid_list_info_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) ha->optrom_size = OPTROM_SIZE_83XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) ha->isp_ops = &qla27xx_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) ha->nvram_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) ha->nvram_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) } else if (IS_QLA28XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) ha->portnum = PCI_FUNC(ha->pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) ha->mbx_count = MAILBOX_REGISTER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) req_length = REQUEST_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) rsp_length = RESPONSE_ENTRY_CNT_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) ha->gid_list_info_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) ha->optrom_size = OPTROM_SIZE_28XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) ha->isp_ops = &qla27xx_isp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) ha->nvram_conf_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) ha->nvram_data_off = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) "mbx_count=%d, req_length=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) "max_fibre_devices=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) ha->nvram_npiv_size, ha->max_fibre_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) "isp_ops=%p, flash_conf_off=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) ha->nvram_conf_off, ha->nvram_data_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) /* Configure PCI I/O space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) ret = ha->isp_ops->iospace_config(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) goto iospace_config_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) ql_log_pci(ql_log_info, pdev, 0x001d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) "Found an ISP%04X irq %d iobase 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) pdev->device, pdev->irq, ha->iobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) mutex_init(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) mutex_init(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) init_completion(&ha->mbx_cmd_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) complete(&ha->mbx_cmd_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) init_completion(&ha->mbx_intr_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) init_completion(&ha->dcbx_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) init_completion(&ha->lb_portup_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) set_bit(0, (unsigned long *) ha->vp_idx_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) qla2x00_config_dma_addressing(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) "64 Bit addressing is %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) ha->flags.enable_64bit_addressing ? "enable" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) "disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) ql_log_pci(ql_log_fatal, pdev, 0x0031,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) "Failed to allocate memory for adapter, aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) goto probe_hw_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) req->max_q_depth = MAX_Q_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) req->max_q_depth = ql2xmaxqdepth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) base_vha = qla2x00_create_host(sht, ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (!base_vha) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) goto probe_hw_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) pci_set_drvdata(pdev, base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) host = base_vha->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) base_vha->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if (IS_QLA2XXX_MIDTYPE(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) base_vha->mgmt_svr_loop_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) qla2x00_reserve_mgmt_server_loop_id(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) base_vha->vp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) /* Setup fcport template structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) ha->mr.fcport.vha = base_vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) ha->mr.fcport.port_type = FCT_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) ha->mr.fcport.scan_state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) /* Set the SG table size based on ISP type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) if (!IS_FWI2_CAPABLE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) if (IS_QLA2100(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) host->sg_tablesize = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) if (!IS_QLA82XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) host->sg_tablesize = QLA_SG_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) host->max_id = ha->max_fibre_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) host->cmd_per_lun = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) host->unique_id = host->host_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) host->max_cmd_len = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) host->max_cmd_len = MAX_CMDSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) host->max_channel = MAX_BUSES - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) /* Older HBAs support only 16-bit LUNs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) ql2xmaxlun > 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) host->max_lun = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) host->max_lun = ql2xmaxlun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) host->transportt = qla2xxx_transport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) ql_dbg(ql_dbg_init, base_vha, 0x0033,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) "max_id=%d this_id=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) host->this_id, host->cmd_per_lun, host->unique_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) host->max_cmd_len, host->max_channel, host->max_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) host->transportt, sht->vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) /* Set up the irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) ret = qla2x00_request_irqs(ha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) goto probe_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) /* Alloc arrays of request and response ring ptrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) ret = qla2x00_alloc_queues(ha, req, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) ql_log(ql_log_fatal, base_vha, 0x003d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) "Failed to allocate memory for queue pointers..."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) "aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) goto probe_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (ha->mqenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) /* number of hardware queues supported by blk/scsi-mq*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) host->nr_hw_queues = ha->max_qpairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) ql_dbg(ql_dbg_init, base_vha, 0x0192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (ql2xnvmeenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) host->nr_hw_queues = ha->max_qpairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) ql_dbg(ql_dbg_init, base_vha, 0x0194,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) "FC-NVMe support is enabled, HW queues=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) host->nr_hw_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) ql_dbg(ql_dbg_init, base_vha, 0x0193,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) "blk/scsi-mq disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) qlt_probe_one_stage1(base_vha, ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) /* Assign back pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) rsp->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) req->rsp = rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) if (IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) ha->rsp_q_map[0] = rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) ha->req_q_map[0] = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) set_bit(0, ha->req_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) set_bit(0, ha->rsp_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) /* FWI2-capable only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) req->req_q_in = &ha->iobase->isp24.req_q_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) req->req_q_out = &ha->iobase->isp24.req_q_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) IS_QLA28XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) if (IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) req->req_q_in = &ha->iobase->ispfx00.req_q_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) req->req_q_out = &ha->iobase->ispfx00.req_q_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) if (IS_P3P_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) req->req_q_out = &ha->iobase->isp82.req_q_out[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) "req->req_q_in=%p req->req_q_out=%p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) req->req_q_in, req->req_q_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) rsp->rsp_q_in, rsp->rsp_q_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) ql_dbg(ql_dbg_init, base_vha, 0x003e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) ql_dbg(ql_dbg_init, base_vha, 0x003f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) if (unlikely(!ha->wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) goto probe_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) if (ha->isp_ops->initialize_adapter(base_vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) ql_log(ql_log_fatal, base_vha, 0x00d6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) "Failed to initialize adapter - Adapter flags %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) base_vha->device_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) if (IS_QLA82XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) qla82xx_idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) QLA8XXX_DEV_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) qla82xx_idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) ql_log(ql_log_fatal, base_vha, 0x00d7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) "HW State: FAILED.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) } else if (IS_QLA8044(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) qla8044_idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) qla8044_wr_direct(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) QLA8044_CRB_DEV_STATE_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) QLA8XXX_DEV_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) qla8044_idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) ql_log(ql_log_fatal, base_vha, 0x0150,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) "HW State: FAILED.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) goto probe_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) if (IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) host->can_queue = QLAFX00_MAX_CANQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) host->can_queue = req->num_outstanding_cmds - 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) ql_dbg(ql_dbg_init, base_vha, 0x0032,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) host->can_queue, base_vha->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) base_vha->mgmt_svr_loop_id, host->sg_tablesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) if (ha->mqenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) bool startit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (QLA_TGT_MODE_ENABLED())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) startit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) startit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) /* Create start of day qpairs for Block MQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) for (i = 0; i < ha->max_qpairs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) qla2xxx_create_qpair(base_vha, 5, 0, startit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) qla_init_iocb_limit(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) if (ha->flags.running_gold_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) goto skip_dpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) * Startup the kernel thread for this host adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) "%s_dpc", base_vha->host_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) if (IS_ERR(ha->dpc_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) ql_log(ql_log_fatal, base_vha, 0x00ed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) "Failed to start DPC thread.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) ret = PTR_ERR(ha->dpc_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) ha->dpc_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) goto probe_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) ql_dbg(ql_dbg_init, base_vha, 0x00ee,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) "DPC thread started successfully.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) * If we're not coming up in initiator mode, we might sit for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) * a while without waking up the dpc thread, which leads to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) * stuck process warning. So just kick the dpc once here and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) * let the kthread start (and go back to sleep in qla2x00_do_dpc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) qla2xxx_wake_dpc(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) INIT_WORK(&ha->idc_state_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) qla83xx_idc_state_handler_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) INIT_WORK(&ha->nic_core_unrecoverable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) qla83xx_nic_core_unrecoverable_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) skip_dpc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) list_add_tail(&base_vha->list, &ha->vp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) base_vha->host->irq = ha->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) /* Initialized the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) qla2x00_start_timer(base_vha, WATCH_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) ql_dbg(ql_dbg_init, base_vha, 0x00ef,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) "Started qla2x00_timer with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) "interval=%d.\n", WATCH_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) ql_dbg(ql_dbg_init, base_vha, 0x00f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) "Detected hba at address=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if (ha->fw_attributes & BIT_4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) int prot = 0, guard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) base_vha->flags.difdix_supported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) ql_dbg(ql_dbg_init, base_vha, 0x00f1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) "Registering for DIF/DIX type 1 and 3 protection.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) if (ql2xenabledif == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) prot = SHOST_DIX_TYPE0_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) if (ql2xprotmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) scsi_host_set_prot(host, ql2xprotmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) scsi_host_set_prot(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) prot | SHOST_DIF_TYPE1_PROTECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) | SHOST_DIF_TYPE2_PROTECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) | SHOST_DIF_TYPE3_PROTECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) | SHOST_DIX_TYPE1_PROTECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) | SHOST_DIX_TYPE2_PROTECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) | SHOST_DIX_TYPE3_PROTECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) guard = SHOST_DIX_GUARD_CRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) if (IS_PI_IPGUARD_CAPABLE(ha) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) guard |= SHOST_DIX_GUARD_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) if (ql2xprotguard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) scsi_host_set_guard(host, ql2xprotguard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) scsi_host_set_guard(host, guard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) base_vha->flags.difdix_supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) ha->isp_ops->enable_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if (IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) ret = qlafx00_fx_disc(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) host->sg_tablesize = (ha->mr.extended_io_enabled) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) QLA_SG_ALL : 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) ret = scsi_add_host(host, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) goto probe_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) base_vha->flags.init_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) base_vha->flags.online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) ha->prev_minidump_failed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) ql_dbg(ql_dbg_init, base_vha, 0x00f2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) "Init done and hba is online.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) if (qla_ini_mode_enabled(base_vha) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) qla_dual_mode_enabled(base_vha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) scsi_scan_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) ql_dbg(ql_dbg_init, base_vha, 0x0122,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) "skipping scsi_scan_host() for non-initiator port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) qla2x00_alloc_sysfs_attr(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) if (IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) ret = qlafx00_fx_disc(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) /* Register system information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) ret = qlafx00_fx_disc(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) qla2x00_init_host_attr(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) qla2x00_dfs_setup(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) ql_log(ql_log_info, base_vha, 0x00fb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) ql_log(ql_log_info, base_vha, 0x00fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) sizeof(pci_info)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) base_vha->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) qlt_add_target(ha, base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) if (test_bit(UNLOADING, &base_vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) probe_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) if (base_vha->gnl.l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) base_vha->gnl.l, base_vha->gnl.ldma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) base_vha->gnl.l = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) if (base_vha->timer_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) qla2x00_stop_timer(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) base_vha->flags.online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (ha->dpc_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) struct task_struct *t = ha->dpc_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) ha->dpc_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) kthread_stop(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) qla2x00_free_device(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) scsi_host_put(base_vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) * Need to NULL out local req/rsp after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) * qla2x00_free_device => qla2x00_free_queues frees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) * what these are pointing to. Or else we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) * fall over below in qla2x00_free_req/rsp_que.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) rsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) probe_hw_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) qla2x00_mem_free(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) qla2x00_free_req_que(ha, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) qla2x00_free_rsp_que(ha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) qla2x00_clear_drv_active(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) iospace_config_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (IS_P3P_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) if (!ha->nx_pcibase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) iounmap((device_reg_t *)ha->nx_pcibase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) if (!ql2xdbwr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) iounmap((device_reg_t *)ha->nxdb_wr_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (ha->iobase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) iounmap(ha->iobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (ha->cregbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) iounmap(ha->cregbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) pci_release_selected_regions(ha->pdev, ha->bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) kfree(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) disable_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) scsi_qla_host_t *vp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) struct qla_hw_data *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) if (!base_vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) list_for_each_entry(vp, &ha->vp_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) * Indicate device removal to prevent future board_disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) * and wait until any pending board_disable has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) qla2x00_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) scsi_qla_host_t *vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) struct qla_hw_data *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) vha = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) ql_log(ql_log_info, vha, 0xfffa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) "Adapter shutdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) * Prevent future board_disable and wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) * until any pending board_disable has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) __qla_set_remove_flag(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) cancel_work_sync(&ha->board_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) if (!atomic_read(&pdev->enable_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) /* Notify ISPFX00 firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) if (IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) qlafx00_driver_shutdown(vha, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) /* Turn-off FCE trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) if (ha->flags.fce_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) qla2x00_disable_fce_trace(vha, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) ha->flags.fce_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) /* Turn-off EFT trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) if (ha->eft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) qla2x00_disable_eft_trace(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) IS_QLA28XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) if (ha->flags.fw_started)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) qla2x00_abort_isp_cleanup(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) /* Stop currently executing firmware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) qla2x00_try_to_stop_firmware(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) /* Disable timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (vha->timer_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) qla2x00_stop_timer(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) /* Turn adapter off line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) vha->flags.online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) /* turn-off interrupts on the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) if (ha->interrupts_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) vha->flags.init_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) ha->isp_ops->disable_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) qla2x00_free_irqs(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) qla2x00_free_fw_dump(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) ql_log(ql_log_info, vha, 0xfffe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) "Adapter shutdown successfully.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) /* Deletes all the virtual ports for a given ha */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) scsi_qla_host_t *vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) mutex_lock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) while (ha->cur_vport_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) BUG_ON(base_vha->list.next == &ha->vp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) /* This assumes first entry in ha->vp_list is always base vha */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) scsi_host_get(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) qla_nvme_delete(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) fc_vport_terminate(vha->fc_vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) scsi_host_put(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) mutex_lock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) /* Stops all deferred work threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) /* Cancel all work and destroy DPC workqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) if (ha->dpc_lp_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) cancel_work_sync(&ha->idc_aen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) destroy_workqueue(ha->dpc_lp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) ha->dpc_lp_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) if (ha->dpc_hp_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) cancel_work_sync(&ha->nic_core_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) cancel_work_sync(&ha->idc_state_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) cancel_work_sync(&ha->nic_core_unrecoverable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) destroy_workqueue(ha->dpc_hp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) ha->dpc_hp_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) /* Kill the kernel thread for this host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) if (ha->dpc_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) struct task_struct *t = ha->dpc_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) * qla2xxx_wake_dpc checks for ->dpc_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) * so we need to zero it out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) ha->dpc_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) kthread_stop(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) qla2x00_unmap_iobases(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) if (IS_QLA82XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) iounmap((device_reg_t *)ha->nx_pcibase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) if (!ql2xdbwr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) iounmap((device_reg_t *)ha->nxdb_wr_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) if (ha->iobase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) iounmap(ha->iobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) if (ha->cregbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) iounmap(ha->cregbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) if (ha->mqiobase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) iounmap(ha->mqiobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (ha->msixbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) iounmap(ha->msixbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) qla2x00_clear_drv_active(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) if (IS_QLA8044(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) qla8044_idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) qla8044_clear_drv_active(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) qla8044_idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) } else if (IS_QLA82XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) qla82xx_idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) qla82xx_clear_drv_active(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) qla82xx_idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) qla2x00_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) scsi_qla_host_t *base_vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) struct qla_hw_data *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) base_vha = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) ql_log(ql_log_info, base_vha, 0xb079,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) "Removing driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) __qla_set_remove_flag(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) cancel_work_sync(&ha->board_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) * If the PCI device is disabled then there was a PCI-disconnect and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) * qla2x00_disable_board_on_pci_error has taken care of most of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) * resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) if (!atomic_read(&pdev->enable_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) base_vha->gnl.l, base_vha->gnl.ldma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) base_vha->gnl.l = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) scsi_host_put(base_vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) kfree(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) pci_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) qla2x00_wait_for_hba_ready(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) * if UNLOADING flag is already set, then continue unload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) * where it was set first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) IS_QLA28XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) if (ha->flags.fw_started)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) qla2x00_abort_isp_cleanup(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) } else if (!IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) if (IS_QLA8031(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) "Clearing fcoe driver presence.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) "Error while clearing DRV-Presence.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) qla2x00_try_to_stop_firmware(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) qla2x00_wait_for_sess_deletion(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) qla_nvme_delete(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) dma_free_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) base_vha->gnl.l = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) vfree(base_vha->scan.l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) if (IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) qlafx00_driver_shutdown(base_vha, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) qla2x00_delete_all_vps(ha, base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) qla2x00_dfs_remove(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) qla84xx_put_chip(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) /* Disable timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) if (base_vha->timer_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) qla2x00_stop_timer(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) base_vha->flags.online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) /* free DMA memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) if (ha->exlogin_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) qla2x00_free_exlogin_buffer(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) /* free DMA memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) if (ha->exchoffld_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) qla2x00_free_exchoffld_buffer(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) qla2x00_destroy_deferred_work(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) qlt_remove_target(ha, base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) qla2x00_free_sysfs_attr(base_vha, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) fc_remove_host(base_vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) qlt_remove_target_resources(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) scsi_remove_host(base_vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) qla2x00_free_device(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) qla2x00_clear_drv_active(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) scsi_host_put(base_vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) qla2x00_unmap_iobases(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) pci_release_selected_regions(ha->pdev, ha->bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) kfree(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) pci_disable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) qla24xx_free_purex_list(struct purex_list *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) struct list_head *item, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) spin_lock_irqsave(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) list_for_each_safe(item, next, &list->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) list_del(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) kfree(list_entry(item, struct purex_item, list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) spin_unlock_irqrestore(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) qla2x00_free_device(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) /* Disable timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) if (vha->timer_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) qla2x00_stop_timer(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) qla25xx_delete_queues(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) vha->flags.online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) /* turn-off interrupts on the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) if (ha->interrupts_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) vha->flags.init_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) ha->isp_ops->disable_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) qla2x00_free_fcports(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) qla2x00_free_irqs(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) /* Flush the work queue and remove it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) if (ha->wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) flush_workqueue(ha->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) destroy_workqueue(ha->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) ha->wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) qla24xx_free_purex_list(&vha->purex_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) qla2x00_mem_free(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) qla82xx_md_free(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) qla2x00_free_queues(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) void qla2x00_free_fcports(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) fc_port_t *fcport, *tfcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) qla2x00_free_fcport(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) int now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) if (!fcport->rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) if (fcport->rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) "%s %8phN. rport %p roles %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) __func__, fcport->port_name, fcport->rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) fcport->rport->roles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) fc_remote_port_delete(fcport->rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) qlt_do_generation_tick(vha, &now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) * qla2x00_mark_device_lost Updates fcport state when device goes offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) * Input: ha = adapter block pointer. fcport = port structure pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) * Return: None.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) int do_login)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) if (IS_QLAFX00(vha->hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) qla2x00_schedule_rport_del(vha, fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) if (atomic_read(&fcport->state) == FCS_ONLINE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) vha->vp_idx == fcport->vha->vp_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) qla2x00_schedule_rport_del(vha, fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) * We may need to retry the login, so don't change the state of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) * port but do the retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) if (!do_login)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) ql_dbg(ql_dbg_disc, vha, 0x20f1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) "Mark all dev lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) list_for_each_entry(fcport, &vha->vp_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) if (fcport->loop_id != FC_NO_LOOP_ID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) (fcport->flags & FCF_FCP2_DEVICE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) fcport->port_type == FCT_TARGET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) !qla2x00_reset_active(vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) ql_dbg(ql_dbg_disc, vha, 0x211a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) fcport->flags, fcport->port_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) fcport->d_id.b24, fcport->port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) fcport->scan_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) qlt_schedule_sess_for_deletion(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) if (IS_FWI2_CAPABLE(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) set_bit(i, ha->loop_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) set_bit(BROADCAST, ha->loop_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) * qla2x00_mem_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) * Allocates adapter memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) * 0 = success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) * !0 = failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) struct req_que **req, struct rsp_que **rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) char name[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) &ha->init_cb_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) if (!ha->init_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (qlt_mem_alloc(ha) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) goto fail_free_init_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) if (!ha->gid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) goto fail_free_tgt_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) if (!ha->srb_mempool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) goto fail_free_gid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) if (IS_P3P_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) /* Allocate cache for CT6 Ctx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) if (!ctx_cachep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) ctx_cachep = kmem_cache_create("qla2xxx_ctx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) sizeof(struct ct6_dsd), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) if (!ctx_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) goto fail_free_srb_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) ctx_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) if (!ha->ctx_mempool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) goto fail_free_srb_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) "ctx_cachep=%p ctx_mempool=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) ctx_cachep, ha->ctx_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) /* Get memory for cached NVRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) if (!ha->nvram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) goto fail_free_ctx_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) ha->pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) DMA_POOL_SIZE, 8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) if (!ha->s_dma_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) goto fail_free_nvram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) if (IS_P3P_TYPE(ha) || ql2xenabledif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) DSD_LIST_DMA_POOL_SIZE, 8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) if (!ha->dl_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) "Failed to allocate memory for dl_dma_pool.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) goto fail_s_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) FCP_CMND_DMA_POOL_SIZE, 8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) if (!ha->fcp_cmnd_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) goto fail_dl_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) if (ql2xenabledif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) struct dsd_dma *dsd, *nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) uint i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) /* Creata a DMA pool of buffers for DIF bundling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) ha->dif_bundl_pool = dma_pool_create(name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) if (!ha->dif_bundl_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) "%s: failed create dif_bundl_pool\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) goto fail_dif_bundl_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) INIT_LIST_HEAD(&ha->pool.good.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) INIT_LIST_HEAD(&ha->pool.unusable.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) ha->pool.good.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) ha->pool.unusable.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) for (i = 0; i < 128; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) if (!dsd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) ql_dbg_pci(ql_dbg_init, ha->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 0xe0ee, "%s: failed alloc dsd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) ha->dif_bundle_kallocs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) dsd->dsd_addr = dma_pool_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) ha->dif_bundl_pool, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) &dsd->dsd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) if (!dsd->dsd_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) ql_dbg_pci(ql_dbg_init, ha->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 0xe0ee,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) "%s: failed alloc ->dsd_addr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) kfree(dsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) ha->dif_bundle_kallocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) ha->dif_bundle_dma_allocs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) * if DMA buffer crosses 4G boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) * put it on bad list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) if (MSD(dsd->dsd_list_dma) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) MSD(dsd->dsd_list_dma + bufsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) list_add_tail(&dsd->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) &ha->pool.unusable.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) ha->pool.unusable.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) list_add_tail(&dsd->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) &ha->pool.good.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) ha->pool.good.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) /* return the good ones back to the pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) list_for_each_entry_safe(dsd, nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) &ha->pool.good.head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) list_del(&dsd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) dma_pool_free(ha->dif_bundl_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) dsd->dsd_addr, dsd->dsd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) ha->dif_bundle_dma_allocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) kfree(dsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) ha->dif_bundle_kallocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) "%s: dif dma pool (good=%u unusable=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) __func__, ha->pool.good.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) ha->pool.unusable.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) ha->dif_bundl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) /* Allocate memory for SNS commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) /* Get consistent memory allocated for SNS commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) if (!ha->sns_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) goto fail_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) "sns_cmd: %p.\n", ha->sns_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) /* Get consistent memory allocated for MS IOCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) &ha->ms_iocb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) if (!ha->ms_iocb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) goto fail_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) /* Get consistent memory allocated for CT SNS commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) if (!ha->ct_sns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) goto fail_free_ms_iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) "ms_iocb=%p ct_sns=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) ha->ms_iocb, ha->ct_sns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) /* Allocate memory for request ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) if (!*req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) "Failed to allocate memory for req.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) goto fail_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) (*req)->length = req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) ((*req)->length + 1) * sizeof(request_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) &(*req)->dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) if (!(*req)->ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) "Failed to allocate memory for req_ring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) goto fail_req_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) /* Allocate memory for response ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (!*rsp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) "Failed to allocate memory for rsp.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) goto fail_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) (*rsp)->hw = ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) (*rsp)->length = rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) ((*rsp)->length + 1) * sizeof(response_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) &(*rsp)->dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) if (!(*rsp)->ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) "Failed to allocate memory for rsp_ring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) goto fail_rsp_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) (*req)->rsp = *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) (*rsp)->req = *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) "req=%p req->length=%d req->ring=%p rsp=%p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) "rsp->length=%d rsp->ring=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) (*rsp)->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) /* Allocate memory for NVRAM data for vports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) if (ha->nvram_npiv_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) ha->npiv_info = kcalloc(ha->nvram_npiv_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) sizeof(struct qla_npiv_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) if (!ha->npiv_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) "Failed to allocate memory for npiv_info.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) goto fail_npiv_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) ha->npiv_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) /* Get consistent memory allocated for EX-INIT-CB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) IS_QLA28XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) &ha->ex_init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) if (!ha->ex_init_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) goto fail_ex_init_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) "ex_init_cb=%p.\n", ha->ex_init_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) /* Get consistent memory allocated for Special Features-CB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) ha->sf_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) &ha->sf_init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) if (!ha->sf_init_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) goto fail_sf_init_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) "sf_init_cb=%p.\n", ha->sf_init_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) INIT_LIST_HEAD(&ha->gbl_dsd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) /* Get consistent memory allocated for Async Port-Database. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) if (!IS_FWI2_CAPABLE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) &ha->async_pd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) if (!ha->async_pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) goto fail_async_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) "async_pd=%p.\n", ha->async_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) INIT_LIST_HEAD(&ha->vp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) /* Allocate memory for our loop_id bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) if (!ha->loop_id_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) goto fail_loop_id_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) qla2x00_set_reserved_loop_ids(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) "loop_id_map=%p.\n", ha->loop_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) if (!ha->sfp_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) "Unable to allocate memory for SFP read-data.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) goto fail_sfp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) ha->flt = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) if (!ha->flt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) "Unable to allocate memory for FLT.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) goto fail_flt_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) fail_flt_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) ha->sfp_data, ha->sfp_data_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) fail_sfp_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) kfree(ha->loop_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) fail_loop_id_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) fail_async_pd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) fail_sf_init_cb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) fail_ex_init_cb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) kfree(ha->npiv_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) fail_npiv_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) (*rsp)->ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) (*rsp)->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) fail_rsp_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) kfree(*rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) *rsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) fail_rsp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) sizeof(request_t), (*req)->ring, (*req)->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) (*req)->ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) (*req)->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) fail_req_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) kfree(*req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) fail_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) ha->ct_sns, ha->ct_sns_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) ha->ct_sns = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) ha->ct_sns_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) fail_free_ms_iocb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) ha->ms_iocb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) ha->ms_iocb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) if (ha->sns_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) ha->sns_cmd, ha->sns_cmd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) fail_dma_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) if (ql2xenabledif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) struct dsd_dma *dsd, *nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) list_del(&dsd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) dsd->dsd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) ha->dif_bundle_dma_allocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) kfree(dsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) ha->dif_bundle_kallocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) ha->pool.unusable.count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) dma_pool_destroy(ha->dif_bundl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) ha->dif_bundl_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) fail_dif_bundl_dma_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (IS_QLA82XX(ha) || ql2xenabledif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) dma_pool_destroy(ha->fcp_cmnd_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) ha->fcp_cmnd_dma_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) fail_dl_dma_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) if (IS_QLA82XX(ha) || ql2xenabledif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) dma_pool_destroy(ha->dl_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) ha->dl_dma_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) fail_s_dma_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) dma_pool_destroy(ha->s_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) ha->s_dma_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) fail_free_nvram:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) kfree(ha->nvram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) ha->nvram = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) fail_free_ctx_mempool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) mempool_destroy(ha->ctx_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) ha->ctx_mempool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) fail_free_srb_mempool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) mempool_destroy(ha->srb_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) ha->srb_mempool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) fail_free_gid_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) ha->gid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) ha->gid_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) ha->gid_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) ha->gid_list_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) fail_free_tgt_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) qlt_mem_free(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) fail_free_init_cb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) ha->init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) ha->init_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) ha->init_cb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) ql_log(ql_log_fatal, NULL, 0x0030,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) "Memory allocation failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) uint16_t size, max_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) uint32_t temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) /* Return if we don't need to alloacate any extended logins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) max_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) rval = qla_get_exlogin_status(vha, &size, &max_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) "Failed to get exlogin status.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) temp *= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) if (temp != ha->exlogin_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) qla2x00_free_exlogin_buffer(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) ha->exlogin_size = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) ql_log(ql_log_info, vha, 0xd024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) max_cnt, size, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) ql_log(ql_log_info, vha, 0xd025,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) "EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) /* Get consistent memory for extended logins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) if (!ha->exlogin_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) "Failed to allocate memory for exlogin_buf_dma.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) /* Now configure the dma buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) ql_log(ql_log_fatal, vha, 0xd033,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) "Setup extended login buffer ****FAILED****.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) qla2x00_free_exlogin_buffer(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) * qla2x00_free_exlogin_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) * ha = adapter block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) if (ha->exlogin_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) ha->exlogin_buf, ha->exlogin_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) ha->exlogin_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) ha->exlogin_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) *ret_cnt = FW_DEF_EXCHANGES_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) if (max_cnt > vha->hw->max_exchg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) max_cnt = vha->hw->max_exchg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) if (qla_ini_mode_enabled(vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) if (vha->ql2xiniexchg > max_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) vha->ql2xiniexchg = max_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) *ret_cnt = vha->ql2xiniexchg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) } else if (qla_tgt_mode_enabled(vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) if (vha->ql2xexchoffld > max_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) vha->ql2xexchoffld = max_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) *ret_cnt = vha->ql2xexchoffld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) } else if (qla_dual_mode_enabled(vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) if (temp > max_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) vha->ql2xiniexchg -= (temp - max_cnt)/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) temp = max_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) if (temp > FW_DEF_EXCHANGES_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) *ret_cnt = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) u16 size, max_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) u32 actual_cnt, totsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) if (!ha->flags.exchoffld_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) if (!IS_EXCHG_OFFLD_CAPABLE(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) max_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) "Failed to get exlogin status.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) ql_log(ql_log_info, vha, 0xd014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) "Actual exchange offload count: %d.\n", actual_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) totsz = actual_cnt * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) if (totsz != ha->exchoffld_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) qla2x00_free_exchoffld_buffer(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) ha->exchoffld_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) ha->flags.exchoffld_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) ha->exchoffld_size = totsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) ql_log(ql_log_info, vha, 0xd016,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) max_cnt, actual_cnt, size, totsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) ql_log(ql_log_info, vha, 0xd017,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) "Exchange Buffers requested size = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) ha->exchoffld_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) /* Get consistent memory for extended logins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) if (!ha->exchoffld_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) "Failed to allocate memory for Exchange Offload.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) if (ha->max_exchg >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) ha->max_exchg -= REDUCE_EXCHANGES_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) } else if (ha->max_exchg >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) (FW_DEF_EXCHANGES_CNT + 512)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) ha->max_exchg -= 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) ha->flags.exchoffld_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) "Disabling Exchange offload due to lack of memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) ha->exchoffld_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) /* pathological case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) qla2x00_free_exchoffld_buffer(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) ha->exchoffld_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) ha->flags.exchoffld_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) ql_log(ql_log_info, vha, 0xd016,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) ha->exchoffld_size, actual_cnt, size, totsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) /* Now configure the dma buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) rval = qla_set_exchoffld_mem_cfg(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) ql_log(ql_log_fatal, vha, 0xd02e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) "Setup exchange offload buffer ****FAILED****.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) qla2x00_free_exchoffld_buffer(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) /* re-adjust number of target exchange */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) if (qla_ini_mode_enabled(vha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) icb->exchange_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) * qla2x00_free_exchoffld_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) * ha = adapter block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) if (ha->exchoffld_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) ha->exchoffld_buf, ha->exchoffld_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) ha->exchoffld_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) ha->exchoffld_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) * qla2x00_free_fw_dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) * Frees fw dump stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) * ha = adapter block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) qla2x00_free_fw_dump(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) struct fwdt *fwdt = ha->fwdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) uint j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) if (ha->fce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) dma_free_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) FCE_SIZE, ha->fce, ha->fce_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) if (ha->eft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) dma_free_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) EFT_SIZE, ha->eft, ha->eft_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) if (ha->fw_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) vfree(ha->fw_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) ha->fce = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) ha->fce_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) ha->flags.fce_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) ha->eft = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) ha->eft_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) ha->fw_dumped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) ha->fw_dump_cap_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) ha->fw_dump_reading = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) ha->fw_dump = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) ha->fw_dump_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) for (j = 0; j < 2; j++, fwdt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) if (fwdt->template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) vfree(fwdt->template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) fwdt->template = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) fwdt->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) * qla2x00_mem_free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) * Frees all adapter allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) qla2x00_mem_free(struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) qla2x00_free_fw_dump(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) if (ha->mctp_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) ha->mctp_dump_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) ha->mctp_dump = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) mempool_destroy(ha->srb_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) ha->srb_mempool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) if (ha->dcbx_tlv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) ha->dcbx_tlv, ha->dcbx_tlv_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) ha->dcbx_tlv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) if (ha->xgmac_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) ha->xgmac_data, ha->xgmac_data_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) ha->xgmac_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) if (ha->sns_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) ha->sns_cmd, ha->sns_cmd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) ha->sns_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) ha->sns_cmd_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) if (ha->ct_sns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) ha->ct_sns, ha->ct_sns_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) ha->ct_sns = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) ha->ct_sns_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) if (ha->sfp_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) ha->sfp_data_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) ha->sfp_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) if (ha->flt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) dma_free_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) ha->flt, ha->flt_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) ha->flt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) ha->flt_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) if (ha->ms_iocb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) ha->ms_iocb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) ha->ms_iocb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) if (ha->sf_init_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) dma_pool_free(ha->s_dma_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) ha->sf_init_cb, ha->sf_init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) if (ha->ex_init_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) dma_pool_free(ha->s_dma_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) ha->ex_init_cb, ha->ex_init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) ha->ex_init_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) ha->ex_init_cb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) if (ha->async_pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) ha->async_pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) ha->async_pd_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) dma_pool_destroy(ha->s_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) ha->s_dma_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) if (ha->gid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) ha->gid_list, ha->gid_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) ha->gid_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) ha->gid_list_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) if (IS_QLA82XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) if (!list_empty(&ha->gbl_dsd_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) struct dsd_dma *dsd_ptr, *tdsd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) /* clean up allocated prev pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) list_for_each_entry_safe(dsd_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) tdsd_ptr, &ha->gbl_dsd_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) dma_pool_free(ha->dl_dma_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) list_del(&dsd_ptr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) kfree(dsd_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) dma_pool_destroy(ha->dl_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) ha->dl_dma_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) dma_pool_destroy(ha->fcp_cmnd_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) ha->fcp_cmnd_dma_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) mempool_destroy(ha->ctx_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) ha->ctx_mempool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) if (ql2xenabledif && ha->dif_bundl_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) struct dsd_dma *dsd, *nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) list_del(&dsd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) dsd->dsd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) ha->dif_bundle_dma_allocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) kfree(dsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) ha->dif_bundle_kallocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) ha->pool.unusable.count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) list_del(&dsd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) dsd->dsd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) ha->dif_bundle_dma_allocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) kfree(dsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) ha->dif_bundle_kallocs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) dma_pool_destroy(ha->dif_bundl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) ha->dif_bundl_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) qlt_mem_free(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) if (ha->init_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) ha->init_cb, ha->init_cb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) ha->init_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) ha->init_cb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) vfree(ha->optrom_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) ha->optrom_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) kfree(ha->nvram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) ha->nvram = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) kfree(ha->npiv_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) ha->npiv_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) kfree(ha->swl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) ha->swl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) kfree(ha->loop_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) ha->sf_init_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) ha->sf_init_cb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) ha->loop_id_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) struct qla_hw_data *ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) struct scsi_qla_host *vha = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) "Failed to allocate host from the scsi layer, aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) /* Clear our data area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) vha = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) memset(vha, 0, sizeof(scsi_qla_host_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) vha->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) vha->host_no = host->host_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) vha->hw = ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) vha->qlini_mode = ql2x_ini_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) vha->ql2xexchoffld = ql2xexchoffld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) vha->ql2xiniexchg = ql2xiniexchg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) INIT_LIST_HEAD(&vha->vp_fcports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) INIT_LIST_HEAD(&vha->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) INIT_LIST_HEAD(&vha->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) INIT_LIST_HEAD(&vha->qla_cmd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) INIT_LIST_HEAD(&vha->logo_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) INIT_LIST_HEAD(&vha->plogi_ack_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) INIT_LIST_HEAD(&vha->qp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) INIT_LIST_HEAD(&vha->gnl.fcports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) INIT_LIST_HEAD(&vha->gpnid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) INIT_LIST_HEAD(&vha->purex_list.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) spin_lock_init(&vha->purex_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) spin_lock_init(&vha->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) spin_lock_init(&vha->cmd_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) init_waitqueue_head(&vha->fcport_waitQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) init_waitqueue_head(&vha->vref_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) vha->gnl.size = sizeof(struct get_name_list_extended) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) (ha->max_loop_id + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) if (!vha->gnl.l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) ql_log(ql_log_fatal, vha, 0xd04a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) "Alloc failed for name list.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) scsi_host_put(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) /* todo: what about ext login? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) vha->scan.l = vmalloc(vha->scan.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) if (!vha->scan.l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) ql_log(ql_log_fatal, vha, 0xd04a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) "Alloc failed for scan database.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) vha->gnl.l, vha->gnl.ldma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) vha->gnl.l = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) scsi_host_put(vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) ql_dbg(ql_dbg_init, vha, 0x0041,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) "Allocated the host=%p hw=%p vha=%p dev_name=%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) vha->host, vha->hw, vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) dev_name(&(ha->pdev->dev)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) return vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) struct qla_work_evt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) struct qla_work_evt *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) uint8_t bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) if (test_bit(UNLOADING, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) QLA_VHA_MARK_BUSY(vha, bail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) if (bail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) QLA_VHA_MARK_NOT_BUSY(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) INIT_LIST_HEAD(&e->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) e->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) e->flags = QLA_EVT_FLAG_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) bool q = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) spin_lock_irqsave(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) list_add_tail(&e->list, &vha->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) q = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) spin_unlock_irqrestore(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) if (q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) queue_work(vha->hw->wq, &vha->iocb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) struct qla_work_evt *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) e->u.aen.code = code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) e->u.aen.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) return qla2x00_post_work(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) struct qla_work_evt *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) return qla2x00_post_work(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) #define qla2x00_post_async_work(name, type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) int qla2x00_post_async_##name##_work( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) struct scsi_qla_host *vha, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) fc_port_t *fcport, uint16_t *data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) struct qla_work_evt *e; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) e = qla2x00_alloc_work(vha, type); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) if (!e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) return QLA_FUNCTION_FAILED; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) e->u.logio.fcport = fcport; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) if (data) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) e->u.logio.data[0] = data[0]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) e->u.logio.data[1] = data[1]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) fcport->flags |= FCF_ASYNC_ACTIVE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) return qla2x00_post_work(vha, e); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) struct qla_work_evt *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) e->u.uevent.code = code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) return qla2x00_post_work(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) char event_string[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) char *envp[] = { event_string, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) switch (code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) case QLA_UEVENT_CODE_FW_DUMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) vha->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) uint32_t *data, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) struct qla_work_evt *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) e->u.aenfx.evtcode = evtcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) e->u.aenfx.count = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) return qla2x00_post_work(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) void qla24xx_sched_upd_fcport(fc_port_t *fcport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) if (IS_SW_RESV_ADDR(fcport->d_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) spin_lock_irqsave(&fcport->vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) if (fcport->disc_state == DSC_UPD_FCPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) fcport->jiffies_at_registration = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) fcport->sec_since_registration = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) fcport->next_disc_state = DSC_DELETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) queue_work(system_unbound_wq, &fcport->reg_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) fc_port_t *fcport = NULL, *tfcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) struct qlt_plogi_ack_t *pla =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) uint8_t free_fcport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) ql_dbg(ql_dbg_disc, vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) "%s %d %8phC enter\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) __func__, __LINE__, e->u.new_sess.port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) if (fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) fcport->d_id = e->u.new_sess.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) if (pla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) fcport->fw_login_state = DSC_LS_PLOGI_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) memcpy(fcport->node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) pla->iocb.u.isp24.u.plogi.node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) WWN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) /* we took an extra ref_count to prevent PLOGI ACK when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) * fcport/sess has not been created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) pla->ref_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) if (fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) fcport->d_id = e->u.new_sess.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) fcport->flags |= FCF_FABRIC_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) fcport->fw_login_state = DSC_LS_PLOGI_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) memcpy(fcport->port_name, e->u.new_sess.port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) WWN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) fcport->fc4_type = e->u.new_sess.fc4_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) fcport->dm_login_expire = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) QLA_N2N_WAIT_TIME * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) fcport->fc4_type = FS_FC4TYPE_FCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) fcport->n2n_flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) if (vha->flags.nvme_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) fcport->fc4_type |= FS_FC4TYPE_NVME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) ql_dbg(ql_dbg_disc, vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) "%s %8phC mem alloc fail.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) __func__, e->u.new_sess.port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) if (pla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) list_del(&pla->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) kmem_cache_free(qla_tgt_plogi_cachep, pla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) /* search again to make sure no one else got ahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) tfcp = qla2x00_find_fcport_by_wwpn(vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) e->u.new_sess.port_name, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) if (tfcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) /* should rarily happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) ql_dbg(ql_dbg_disc, vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) "%s %8phC found existing fcport b4 add. DS %d LS %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) __func__, tfcp->port_name, tfcp->disc_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) tfcp->fw_login_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) free_fcport = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) list_add_tail(&fcport->list, &vha->vp_fcports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) if (pla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) qlt_plogi_ack_link(vha, pla, fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) QLT_PLOGI_LINK_SAME_WWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) pla->ref_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) if (fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) fcport->id_changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) fcport->scan_state = QLA_FCPORT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) fcport->chip_reset = vha->hw->base_qpair->chip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) if (pla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) u16 wd3_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) fcport->fw_login_state = DSC_LS_PRLI_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) fcport->local = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) fcport->loop_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) le16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) pla->iocb.u.isp24.nport_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) fcport->fw_login_state = DSC_LS_PRLI_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) wd3_lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) le16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) pla->iocb.u.isp24.u.prli.wd3_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) if (wd3_lo & BIT_7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) fcport->conf_compl_supported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) if ((wd3_lo & BIT_4) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) fcport->port_type = FCT_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) fcport->port_type = FCT_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) qlt_plogi_ack_unref(vha, pla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) fc_port_t *dfcp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) tfcp = qla2x00_find_fcport_by_nportid(vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) &e->u.new_sess.id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) if (tfcp && (tfcp != fcport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) * We have a conflict fcport with same NportID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) ql_dbg(ql_dbg_disc, vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) "%s %8phC found conflict b4 add. DS %d LS %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) __func__, tfcp->port_name, tfcp->disc_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) tfcp->fw_login_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) switch (tfcp->disc_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) case DSC_DELETED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) case DSC_DELETE_PEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) fcport->login_pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) tfcp->conflict = fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) fcport->login_pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) tfcp->conflict = fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) dfcp = tfcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) if (dfcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) qlt_schedule_sess_for_deletion(tfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) if (N2N_TOPO(vha->hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) fcport->flags &= ~FCF_FABRIC_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) fcport->keep_nport_handle = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) if (vha->flags.nvme_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) fcport->fc4_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) fcport->n2n_flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) fcport->fw_login_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) schedule_delayed_work(&vha->scan.scan_work, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) qla24xx_fcport_handle_login(vha, fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) if (free_fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) qla2x00_free_fcport(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) if (pla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) list_del(&pla->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) kmem_cache_free(qla_tgt_plogi_cachep, pla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) struct srb *sp = e->u.iosb.sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) rval = qla2x00_start_sp(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) ql_dbg(ql_dbg_disc, vha, 0x2043,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) "%s: %s: Re-issue IOCB failed (%d).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) __func__, sp->name, rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) qla24xx_sp_unmap(vha, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) qla2x00_do_work(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) struct qla_work_evt *e, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) LIST_HEAD(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) spin_lock_irqsave(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) list_splice_init(&vha->work_list, &work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) spin_unlock_irqrestore(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) list_for_each_entry_safe(e, tmp, &work, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) rc = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) switch (e->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) case QLA_EVT_AEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) fc_host_post_event(vha->host, fc_get_event_number(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) e->u.aen.code, e->u.aen.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) case QLA_EVT_IDC_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) qla81xx_idc_ack(vha, e->u.idc_ack.mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) case QLA_EVT_ASYNC_LOGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) qla2x00_async_login(vha, e->u.logio.fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) e->u.logio.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) case QLA_EVT_ASYNC_LOGOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) rc = qla2x00_async_logout(vha, e->u.logio.fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) case QLA_EVT_ASYNC_ADISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) qla2x00_async_adisc(vha, e->u.logio.fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) e->u.logio.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) case QLA_EVT_UEVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) qla2x00_uevent_emit(vha, e->u.uevent.code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) case QLA_EVT_AENFX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) qlafx00_process_aen(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) case QLA_EVT_GPNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) qla24xx_async_gpnid(vha, &e->u.gpnid.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) case QLA_EVT_UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) qla24xx_sp_unmap(vha, e->u.iosb.sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) case QLA_EVT_RELOGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) qla2x00_relogin(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) case QLA_EVT_NEW_SESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) qla24xx_create_new_sess(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) case QLA_EVT_GPDB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) qla24xx_async_gpdb(vha, e->u.fcport.fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) e->u.fcport.opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) case QLA_EVT_PRLI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) qla24xx_async_prli(vha, e->u.fcport.fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) case QLA_EVT_GPSC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) qla24xx_async_gpsc(vha, e->u.fcport.fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) case QLA_EVT_GNL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) qla24xx_async_gnl(vha, e->u.fcport.fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) case QLA_EVT_NACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) qla24xx_do_nack_work(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) case QLA_EVT_ASYNC_PRLO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) case QLA_EVT_ASYNC_PRLO_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) qla2x00_async_prlo_done(vha, e->u.logio.fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) e->u.logio.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) case QLA_EVT_GPNFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) e->u.gpnft.sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) case QLA_EVT_GPNFT_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) case QLA_EVT_GNNFT_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) case QLA_EVT_GNNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) qla24xx_async_gnnid(vha, e->u.fcport.fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) case QLA_EVT_GFPNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) case QLA_EVT_SP_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) qla_sp_retry(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) case QLA_EVT_IIDMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) qla_do_iidma_work(vha, e->u.fcport.fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) case QLA_EVT_ELS_PLOGI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) e->u.fcport.fcport, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) if (rc == EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) /* put 'work' at head of 'vha->work_list' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) spin_lock_irqsave(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) list_splice(&work, &vha->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) spin_unlock_irqrestore(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) list_del_init(&e->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) if (e->flags & QLA_EVT_FLAG_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) /* For each work completed decrement vha ref count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) QLA_VHA_MARK_NOT_BUSY(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) struct qla_work_evt *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) return qla2x00_post_work(vha, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) /* Relogins all the fcports of a vport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) * Context: dpc thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) void qla2x00_relogin(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) int status, relogin_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) struct event_arg ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) list_for_each_entry(fcport, &vha->vp_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) * If the port is not ONLINE then try to login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) * to it if we haven't run out of retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) if (atomic_read(&fcport->state) != FCS_ONLINE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) fcport->login_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) if (fcport->scan_state != QLA_FCPORT_FOUND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) fcport->disc_state == DSC_LOGIN_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) fcport->disc_state == DSC_DELETE_PEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) relogin_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) if (vha->hw->current_topology != ISP_CFG_NL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) memset(&ea, 0, sizeof(ea));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) ea.fcport = fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) qla24xx_handle_relogin_event(vha, &ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) } else if (vha->hw->current_topology ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) ISP_CFG_NL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) IS_QLA2XXX_MIDTYPE(vha->hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) (void)qla24xx_fcport_handle_login(vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) } else if (vha->hw->current_topology ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) ISP_CFG_NL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) fcport->login_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) qla2x00_local_device_login(vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) if (status == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) fcport->old_loop_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) fcport->loop_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) ql_dbg(ql_dbg_disc, vha, 0x2003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) "Port login OK: logged in ID 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) fcport->loop_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) qla2x00_update_fcport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) (vha, fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) } else if (status == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) set_bit(RELOGIN_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) /* retry the login again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) ql_dbg(ql_dbg_disc, vha, 0x2007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) "Retrying %d login again loop_id 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) fcport->login_retry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) fcport->loop_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) fcport->login_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) if (fcport->login_retry == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) status != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) qla2x00_clear_loop_id(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) if (relogin_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) ql_dbg(ql_dbg_disc, vha, 0x400e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) "Relogin end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) /* Schedule work on any of the dpc-workqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) switch (work_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) case MBA_IDC_AEN: /* 0x8200 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) if (ha->dpc_lp_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) queue_work(ha->dpc_lp_wq, &ha->idc_aen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) case QLA83XX_NIC_CORE_RESET: /* 0x1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) if (!ha->flags.nic_core_reset_hdlr_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) if (ha->dpc_hp_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) "NIC Core reset is already active. Skip "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) "scheduling it again.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) if (ha->dpc_hp_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) if (ha->dpc_hp_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) ql_log(ql_log_warn, base_vha, 0xb05f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) "Unknown work-code=0x%x.\n", work_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) /* Work: Perform NIC Core Unrecoverable state handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) struct qla_hw_data *ha =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) container_of(work, struct qla_hw_data, nic_core_unrecoverable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) uint32_t dev_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) qla83xx_reset_ownership(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) if (ha->flags.nic_core_reset_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) ha->flags.nic_core_reset_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) QLA8XXX_DEV_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) /* Work: Execute IDC state handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) qla83xx_idc_state_handler_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) struct qla_hw_data *ha =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) container_of(work, struct qla_hw_data, idc_state_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) uint32_t dev_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) if (dev_state == QLA8XXX_DEV_FAILED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) qla83xx_idc_state_handler(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) int rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) unsigned long heart_beat_wait = jiffies + (1 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) uint32_t heart_beat_counter1, heart_beat_counter2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) if (time_after(jiffies, heart_beat_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) "Nic Core f/w is not alive.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) &heart_beat_counter1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) &heart_beat_counter2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) } while (heart_beat_counter1 == heart_beat_counter2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) /* Work: Perform NIC Core Reset handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) qla83xx_nic_core_reset_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) struct qla_hw_data *ha =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) container_of(work, struct qla_hw_data, nic_core_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) uint32_t dev_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) if (IS_QLA2031(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) ql_log(ql_log_warn, base_vha, 0xb081,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) "Failed to dump mctp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) if (!ha->flags.nic_core_reset_hdlr_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) &dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) if (dev_state != QLA8XXX_DEV_NEED_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) "Nic Core f/w is alive.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) ha->flags.nic_core_reset_hdlr_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) if (qla83xx_nic_core_reset(base_vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) /* NIC Core reset failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) "NIC Core reset failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) ha->flags.nic_core_reset_hdlr_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) /* Work: Handle 8200 IDC aens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) qla83xx_service_idc_aen(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) struct qla_hw_data *ha =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) container_of(work, struct qla_hw_data, idc_aen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) uint32_t dev_state, idc_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) if (dev_state == QLA8XXX_DEV_NEED_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) "Application requested NIC Core Reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) "Other protocol driver requested NIC Core Reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) } else if (dev_state == QLA8XXX_DEV_FAILED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) qla83xx_wait_logic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) /* Yield CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) if (!in_interrupt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) * Wait about 200ms before retrying again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) * This controls the number of retries for single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) * lock operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) for (i = 0; i < 20; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) cpu_relax(); /* This a nop instr on i386 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) uint32_t data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) uint32_t idc_lck_rcvry_stage_mask = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) uint32_t idc_lck_rcvry_owner_mask = 0x3c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) "Trying force recovery of the IDC lock.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) if ((data & idc_lck_rcvry_stage_mask) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) msleep(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) data &= (IDC_LOCK_RECOVERY_STAGE2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) ~(idc_lck_rcvry_stage_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) rval = qla83xx_wr_reg(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) QLA83XX_IDC_LOCK_RECOVERY, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) /* Forcefully perform IDC UnLock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) /* Clear lock-id by setting 0xff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) /* Clear lock-recovery by setting 0x0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) rval = qla83xx_wr_reg(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) QLA83XX_IDC_LOCK_RECOVERY, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) int rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) uint32_t o_drv_lockid, n_drv_lockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) unsigned long lock_recovery_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) retry_lockid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) /* MAX wait time before forcing IDC Lock recovery = 2 secs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) if (time_after_eq(jiffies, lock_recovery_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) if (o_drv_lockid == n_drv_lockid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) qla83xx_wait_logic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) goto retry_lockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) return QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) uint32_t data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) uint32_t lock_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) /* IDC-lock implementation using driver-lock/lock-id remote registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) retry_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) /* Setting lock-id to our function-number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) ha->portnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) &lock_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) "Failed to acquire IDC lock, acquired by %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) "retrying...\n", lock_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) /* Retry/Perform IDC-Lock recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) if (qla83xx_idc_lock_recovery(base_vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) qla83xx_wait_logic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) goto retry_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) ql_log(ql_log_warn, base_vha, 0xb075,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) "IDC Lock recovery FAILED.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) struct purex_entry_24xx *purex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) char fwstr[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) struct port_database_24xx *pdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) /* Domain Controller is always logged-out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) /* if RDP request is not from Domain Controller: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) if (sid != 0xfffc01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) if (!pdb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) ql_dbg(ql_dbg_init, vha, 0x0181,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) "%s: Failed allocate pdb\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) } else if (qla24xx_get_port_database(vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) le16_to_cpu(purex->nport_handle), pdb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) ql_dbg(ql_dbg_init, vha, 0x0181,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) "%s: Failed get pdb sid=%x\n", __func__, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) pdb->current_login_state != PDS_PRLI_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) ql_dbg(ql_dbg_init, vha, 0x0181,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) "%s: Port not logged in sid=%#x\n", __func__, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) /* RDP request is from logged in port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) kfree(pdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) kfree(pdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) fwstr[strcspn(fwstr, " ")] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) /* if FW version allows RDP response length upto 2048 bytes: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) /* RDP response length is to be reduced to maximum 256 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) * Function Name: qla24xx_process_purex_iocb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) * Prepare a RDP response and send to Fabric switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) * PARAMETERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) * vha: SCSI qla host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) * purex: RDP request received by HBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) struct purex_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) struct purex_entry_24xx *purex =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) (struct purex_entry_24xx *)&item->iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) dma_addr_t rsp_els_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) dma_addr_t rsp_payload_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) dma_addr_t stat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) dma_addr_t sfp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) struct els_entry_24xx *rsp_els = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) struct rdp_rsp_payload *rsp_payload = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) struct link_statistics *stat = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) uint8_t *sfp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) uint16_t sfp_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) uint rsp_payload_length = sizeof(*rsp_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) "%s: Enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) "-------- ELS REQ -------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) purex, sizeof(*purex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) rsp_payload_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) offsetof(typeof(*rsp_payload), optical_elmt_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) ql_dbg(ql_dbg_init, vha, 0x0181,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) "Reducing RSP payload length to %u bytes...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) rsp_payload_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) &rsp_els_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) if (!rsp_els) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) ql_log(ql_log_warn, vha, 0x0183,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) "Failed allocate dma buffer ELS RSP.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) goto dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) &rsp_payload_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) if (!rsp_payload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) ql_log(ql_log_warn, vha, 0x0184,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) "Failed allocate dma buffer ELS RSP payload.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) goto dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) &sfp_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) &stat_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) /* Prepare Response IOCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) rsp_els->entry_type = ELS_IOCB_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) rsp_els->entry_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) rsp_els->sys_define = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) rsp_els->entry_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) rsp_els->handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) rsp_els->nport_handle = purex->nport_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) rsp_els->tx_dsd_count = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) rsp_els->vp_index = purex->vp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) rsp_els->sof_type = EST_SOFI3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) rsp_els->rx_xchg_address = purex->rx_xchg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) rsp_els->rx_dsd_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) rsp_els->opcode = purex->els_frame_payload[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) rsp_els->d_id[0] = purex->s_id[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) rsp_els->d_id[1] = purex->s_id[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) rsp_els->d_id[2] = purex->s_id[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) rsp_els->rx_byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) rsp_els->tx_len = rsp_els->tx_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) rsp_els->rx_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) rsp_els->rx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) /* Prepare Response Payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) sizeof(rsp_payload->hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) /* Link service Request Info Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) rsp_payload->ls_req_info_desc.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) rsp_payload->ls_req_info_desc.req_payload_word_0 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) cpu_to_be32p((uint32_t *)purex->els_frame_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) /* Link service Request Info Descriptor 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) rsp_payload->ls_req_info_desc2.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) rsp_payload->ls_req_info_desc2.req_payload_word_0 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) cpu_to_be32p((uint32_t *)purex->els_frame_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) rsp_payload->sfp_diag_desc.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) if (sfp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) /* SFP Flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) memset(sfp, 0, SFP_RTDI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) if (!rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) /* SFP Flags bits 3-0: Port Tx Laser Type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) sfp_flags |= BIT_0; /* short wave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) else if (sfp[0] & BIT_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) sfp_flags |= BIT_1; /* long wave 1310nm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) else if (sfp[1] & BIT_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) /* SFP Type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) memset(sfp, 0, SFP_RTDI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) if (!rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) sfp_flags |= BIT_4; /* optical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) if (sfp[0] == 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) sfp_flags |= BIT_6; /* sfp+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) /* SFP Diagnostics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) memset(sfp, 0, SFP_RTDI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) if (!rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) __be16 *trx = (__force __be16 *)sfp; /* already be16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) rsp_payload->sfp_diag_desc.temperature = trx[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) rsp_payload->sfp_diag_desc.vcc = trx[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) rsp_payload->sfp_diag_desc.tx_bias = trx[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) rsp_payload->sfp_diag_desc.tx_power = trx[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) rsp_payload->sfp_diag_desc.rx_power = trx[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) /* Port Speed Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) rsp_payload->port_speed_desc.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) qla25xx_fdmi_port_speed_capability(ha));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) qla25xx_fdmi_port_speed_currently(ha));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) /* Link Error Status Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) rsp_payload->ls_err_desc.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) if (stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) if (!rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) rsp_payload->ls_err_desc.link_fail_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) rsp_payload->ls_err_desc.loss_sync_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) rsp_payload->ls_err_desc.loss_sig_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) rsp_payload->ls_err_desc.prim_seq_err_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) rsp_payload->ls_err_desc.inval_xmit_word_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) rsp_payload->ls_err_desc.inval_crc_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) /* Portname Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) rsp_payload->port_name_diag_desc.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) memcpy(rsp_payload->port_name_diag_desc.WWNN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) vha->node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) sizeof(rsp_payload->port_name_diag_desc.WWNN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) memcpy(rsp_payload->port_name_diag_desc.WWPN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) vha->port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) sizeof(rsp_payload->port_name_diag_desc.WWPN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) /* F-Port Portname Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) rsp_payload->port_name_direct_desc.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) memcpy(rsp_payload->port_name_direct_desc.WWNN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) vha->fabric_node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) sizeof(rsp_payload->port_name_direct_desc.WWNN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) memcpy(rsp_payload->port_name_direct_desc.WWPN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) vha->fabric_port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) sizeof(rsp_payload->port_name_direct_desc.WWPN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) /* Bufer Credit Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) rsp_payload->buffer_credit_desc.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) rsp_payload->buffer_credit_desc.fcport_b2b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) if (ha->flags.plogi_template_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) uint32_t tmp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) if (rsp_payload_length < sizeof(*rsp_payload))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) goto send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) /* Optical Element Descriptor, Temperature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) rsp_payload->optical_elmt_desc[0].desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) /* Optical Element Descriptor, Voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) rsp_payload->optical_elmt_desc[1].desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) /* Optical Element Descriptor, Tx Bias Current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) rsp_payload->optical_elmt_desc[2].desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) /* Optical Element Descriptor, Tx Power */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) rsp_payload->optical_elmt_desc[3].desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) /* Optical Element Descriptor, Rx Power */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) rsp_payload->optical_elmt_desc[4].desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) if (sfp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) memset(sfp, 0, SFP_RTDI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) if (!rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) __be16 *trx = (__force __be16 *)sfp; /* already be16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) /* Optical Element Descriptor, Temperature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) rsp_payload->optical_elmt_desc[0].element_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) cpu_to_be32(1 << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) /* Optical Element Descriptor, Voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) rsp_payload->optical_elmt_desc[1].element_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) cpu_to_be32(2 << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) /* Optical Element Descriptor, Tx Bias Current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) rsp_payload->optical_elmt_desc[2].element_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) cpu_to_be32(3 << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) /* Optical Element Descriptor, Tx Power */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) rsp_payload->optical_elmt_desc[3].element_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) cpu_to_be32(4 << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) /* Optical Element Descriptor, Rx Power */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) rsp_payload->optical_elmt_desc[4].element_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) cpu_to_be32(5 << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) memset(sfp, 0, SFP_RTDI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) if (!rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) /* Temperature high/low alarm/warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) rsp_payload->optical_elmt_desc[0].element_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) (sfp[0] >> 7 & 1) << 3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) (sfp[0] >> 6 & 1) << 2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) (sfp[4] >> 7 & 1) << 1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) (sfp[4] >> 6 & 1) << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) /* Voltage high/low alarm/warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) rsp_payload->optical_elmt_desc[1].element_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) (sfp[0] >> 5 & 1) << 3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) (sfp[0] >> 4 & 1) << 2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) (sfp[4] >> 5 & 1) << 1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) (sfp[4] >> 4 & 1) << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) /* Tx Bias Current high/low alarm/warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) rsp_payload->optical_elmt_desc[2].element_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) (sfp[0] >> 3 & 1) << 3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) (sfp[0] >> 2 & 1) << 2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) (sfp[4] >> 3 & 1) << 1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) (sfp[4] >> 2 & 1) << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) /* Tx Power high/low alarm/warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) rsp_payload->optical_elmt_desc[3].element_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) (sfp[0] >> 1 & 1) << 3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) (sfp[0] >> 0 & 1) << 2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) (sfp[4] >> 1 & 1) << 1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) (sfp[4] >> 0 & 1) << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) /* Rx Power high/low alarm/warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) rsp_payload->optical_elmt_desc[4].element_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) (sfp[1] >> 7 & 1) << 3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) (sfp[1] >> 6 & 1) << 2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) (sfp[5] >> 7 & 1) << 1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) (sfp[5] >> 6 & 1) << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) /* Optical Product Data Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) rsp_payload->optical_prod_desc.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) if (sfp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) memset(sfp, 0, SFP_RTDI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) if (!rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) memcpy(rsp_payload->optical_prod_desc.vendor_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) sfp + 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) sizeof(rsp_payload->optical_prod_desc.vendor_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) memcpy(rsp_payload->optical_prod_desc.part_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) sfp + 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) sizeof(rsp_payload->optical_prod_desc.part_number));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) memcpy(rsp_payload->optical_prod_desc.revision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) sfp + 36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) sizeof(rsp_payload->optical_prod_desc.revision));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) memcpy(rsp_payload->optical_prod_desc.serial_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) sfp + 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) sizeof(rsp_payload->optical_prod_desc.serial_number));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) memset(sfp, 0, SFP_RTDI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) if (!rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) memcpy(rsp_payload->optical_prod_desc.date,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) sfp + 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) sizeof(rsp_payload->optical_prod_desc.date));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) send:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) ql_dbg(ql_dbg_init, vha, 0x0183,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) "Sending ELS Response to RDP Request...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) "-------- ELS RSP -------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) rsp_els, sizeof(*rsp_els));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) "-------- ELS RSP PAYLOAD -------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) rsp_payload, rsp_payload_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) ql_log(ql_log_warn, vha, 0x0188,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) "%s: iocb failed to execute -> %x\n", __func__, rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) } else if (rsp_els->comp_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) ql_log(ql_log_warn, vha, 0x0189,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) __func__, rsp_els->comp_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) rsp_els->error_subcode_1, rsp_els->error_subcode_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) dealloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) if (stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) stat, stat_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) if (sfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) sfp, sfp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) if (rsp_payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) rsp_payload, rsp_payload_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) if (rsp_els)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) rsp_els, rsp_els_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) qla24xx_free_purex_item(struct purex_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) if (item == &item->vha->default_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) memset(&item->vha->default_item, 0, sizeof(struct purex_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) kfree(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) void qla24xx_process_purex_list(struct purex_list *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) struct list_head head = LIST_HEAD_INIT(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) struct purex_item *item, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) spin_lock_irqsave(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) list_splice_init(&list->head, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) spin_unlock_irqrestore(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) list_for_each_entry_safe(item, next, &head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) list_del(&item->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) item->process_item(item->vha, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) qla24xx_free_purex_item(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) uint16_t options = (requester_id << 15) | BIT_7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) uint16_t retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) uint32_t data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) /* IDC-unlock implementation using driver-unlock/lock-id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) * remote registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) retry_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) if (data == ha->portnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) /* Clearing lock-id by setting 0xff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) } else if (retry < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) /* SV: XXX: IDC unlock retrying needed here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) /* Retry for IDC-unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) qla83xx_wait_logic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) retry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) "Failed to release IDC lock, retrying=%d\n", retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) goto retry_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) } else if (retry < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) /* Retry for IDC-unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) qla83xx_wait_logic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) retry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) "Failed to read drv-lockid, retrying=%d\n", retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) goto retry_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) /* XXX: IDC-unlock implementation using access-control mbx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) retry_unlock2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) if (retry < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) /* Retry for IDC-unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) qla83xx_wait_logic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) retry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) "Failed to release IDC lock, retrying=%d\n", retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) goto retry_unlock2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) __qla83xx_set_drv_presence(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) int rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) uint32_t drv_presence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) if (rval == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) drv_presence |= (1 << ha->portnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) drv_presence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) qla83xx_set_drv_presence(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) int rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) qla83xx_idc_lock(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) rval = __qla83xx_set_drv_presence(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) qla83xx_idc_unlock(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) __qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) int rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) uint32_t drv_presence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) if (rval == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) drv_presence &= ~(1 << ha->portnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) drv_presence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) int rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) qla83xx_idc_lock(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) rval = __qla83xx_clear_drv_presence(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) qla83xx_idc_unlock(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) qla83xx_need_reset_handler(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) uint32_t drv_ack, drv_presence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) unsigned long ack_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) if ((drv_ack & drv_presence) == drv_presence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) if (time_after_eq(jiffies, ack_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) ql_log(ql_log_warn, vha, 0xb067,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) "RESET ACK TIMEOUT! drv_presence=0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) "drv_ack=0x%x\n", drv_presence, drv_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) * The function(s) which did not ack in time are forced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) * to withdraw any further participation in the IDC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) * reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) if (drv_ack != drv_presence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) drv_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) qla83xx_idc_unlock(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) qla83xx_idc_lock(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) qla83xx_device_bootstrap(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) int rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) uint32_t idc_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) __qla83xx_get_idc_control(vha, &idc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) __qla83xx_set_idc_control(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) qla83xx_idc_unlock(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) rval = qla83xx_restart_nic_firmware(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) qla83xx_idc_lock(vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) ql_log(ql_log_fatal, vha, 0xb06a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) "Failed to restart NIC f/w.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) ql_dbg(ql_dbg_p3p, vha, 0xb06c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) "Success in restarting nic f/w.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) /* Assumes idc_lock always held on entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) int rval = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) unsigned long dev_init_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) uint32_t dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) if (time_after_eq(jiffies, dev_init_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) ql_log(ql_log_warn, base_vha, 0xb06e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) "Initialization TIMEOUT!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) /* Init timeout. Disable further NIC Core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) * communication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) QLA8XXX_DEV_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) ql_log(ql_log_info, base_vha, 0xb06f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) "HW State: FAILED.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) switch (dev_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) case QLA8XXX_DEV_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) if (ha->flags.nic_core_reset_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) qla83xx_idc_audit(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) IDC_AUDIT_COMPLETION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) ha->flags.nic_core_reset_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) "Reset_owner reset by 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) ha->portnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) case QLA8XXX_DEV_COLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) if (ha->flags.nic_core_reset_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) rval = qla83xx_device_bootstrap(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) /* Wait for AEN to change device-state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) case QLA8XXX_DEV_INITIALIZING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) /* Wait for AEN to change device-state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) case QLA8XXX_DEV_NEED_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) qla83xx_need_reset_handler(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) /* Wait for AEN to change device-state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) /* reset timeout value after need reset handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) dev_init_timeout = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) (ha->fcoe_dev_init_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) case QLA8XXX_DEV_NEED_QUIESCENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) /* XXX: DEBUG for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) case QLA8XXX_DEV_QUIESCENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) /* XXX: DEBUG for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) if (ha->flags.quiesce_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) dev_init_timeout = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) (ha->fcoe_dev_init_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) case QLA8XXX_DEV_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) if (ha->flags.nic_core_reset_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) qla83xx_idc_audit(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) IDC_AUDIT_COMPLETION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) ha->flags.nic_core_reset_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) __qla83xx_clear_drv_presence(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) qla8xxx_dev_failed_handler(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) case QLA8XXX_BAD_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) ql_log(ql_log_warn, base_vha, 0xb071,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) "Unknown Device State: %x.\n", dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) qla83xx_idc_unlock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) qla8xxx_dev_failed_handler(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) rval = QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) qla83xx_idc_lock(base_vha, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) qla2x00_disable_board_on_pci_error(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) board_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) struct pci_dev *pdev = ha->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) ql_log(ql_log_warn, base_vha, 0x015b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) "Disabling adapter.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) if (!atomic_read(&pdev->enable_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) ql_log(ql_log_info, base_vha, 0xfffc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) "PCI device disabled, no action req for PCI error=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) base_vha->pci_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) * if UNLOADING flag is already set, then continue unload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) * where it was set first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) qla2x00_wait_for_sess_deletion(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) qla2x00_delete_all_vps(ha, base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) qla2x00_dfs_remove(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) qla84xx_put_chip(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) if (base_vha->timer_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) qla2x00_stop_timer(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) base_vha->flags.online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) qla2x00_destroy_deferred_work(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) * Do not try to stop beacon blink as it will issue a mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) * command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) qla2x00_free_sysfs_attr(base_vha, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) fc_remove_host(base_vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) scsi_remove_host(base_vha->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) base_vha->flags.init_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) qla25xx_delete_queues(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) qla2x00_free_fcports(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) qla2x00_free_irqs(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) qla2x00_mem_free(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) qla82xx_md_free(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) qla2x00_free_queues(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) qla2x00_unmap_iobases(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) pci_release_selected_regions(ha->pdev, ha->bars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) pci_disable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) * qla2x00_do_dpc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) * This kernel thread is a task that is schedule by the interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) * to perform the background processing for interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) * Notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) * This task always run in the context of a kernel thread. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) * is kick-off by the driver's detect code and starts up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) * up one per adapter. It immediately goes to sleep and waits for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) * some fibre event. When either the interrupt handler or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) * the timer routine detects a event it will one of the task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) * bits then wake us up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) qla2x00_do_dpc(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) scsi_qla_host_t *base_vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) struct qla_hw_data *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) uint32_t online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) struct qla_qpair *qpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) ha = (struct qla_hw_data *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) set_user_nice(current, MIN_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) "DPC handler sleeping.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) if (!base_vha->flags.init_done || ha->flags.mbox_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) goto end_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) if (ha->flags.eeh_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) "eeh_busy=%d.\n", ha->flags.eeh_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) goto end_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) ha->dpc_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) "DPC handler waking up, dpc_flags=0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) if (test_bit(UNLOADING, &base_vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) if (IS_P3P_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) if (IS_QLA8044(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) if (test_and_clear_bit(ISP_UNRECOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) qla8044_idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) qla8044_wr_direct(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) QLA8044_CRB_DEV_STATE_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) QLA8XXX_DEV_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) qla8044_idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) ql_log(ql_log_info, base_vha, 0x4004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) "HW State: FAILED.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) qla8044_device_state_handler(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) if (test_and_clear_bit(ISP_UNRECOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) qla82xx_idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) QLA8XXX_DEV_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) qla82xx_idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) ql_log(ql_log_info, base_vha, 0x0151,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) "HW State: FAILED.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) qla82xx_device_state_handler(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) "FCoE context reset scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) &base_vha->dpc_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) if (qla82xx_fcoe_ctx_reset(base_vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) /* FCoE-ctx reset failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) * Escalate to chip-reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) set_bit(ISP_ABORT_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) clear_bit(ABORT_ISP_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) "FCoE context reset end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) } else if (IS_QLAFX00(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) if (test_and_clear_bit(ISP_UNRECOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) "Firmware Reset Recovery\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) if (qlafx00_reset_initialize(base_vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) /* Failed. Abort isp later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) if (!test_bit(UNLOADING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) set_bit(ISP_UNRECOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) ql_dbg(ql_dbg_dpc, base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) 0x4021,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) "Reset Recovery Failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) if (test_and_clear_bit(FX00_TARGET_SCAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) "ISPFx00 Target Scan scheduled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) if (qlafx00_rescan_isp(base_vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) if (!test_bit(UNLOADING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) &base_vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) set_bit(ISP_UNRECOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) "ISPFx00 Target Scan Failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) "ISPFx00 Target Scan End\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) "ISPFx00 Host Info resend scheduled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) qlafx00_fx_disc(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) &base_vha->hw->mr.fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) FXDISC_REG_HOST_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) if (test_and_clear_bit(DETECT_SFP_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) /* Semantic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) * - NO-OP -- await next ISP-ABORT. Preferred method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) * to minimize disruptions that will occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) * when a forced chip-reset occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) * - Force -- ISP-ABORT scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) if (test_and_clear_bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) !test_bit(UNLOADING, &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) bool do_reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) switch (base_vha->qlini_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) case QLA2XXX_INI_MODE_ENABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) case QLA2XXX_INI_MODE_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) if (!qla_tgt_mode_enabled(base_vha) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) !ha->flags.fw_started)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) do_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) case QLA2XXX_INI_MODE_DUAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) if (!qla_dual_mode_enabled(base_vha) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) !ha->flags.fw_started)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) do_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) &base_vha->dpc_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) base_vha->flags.online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) "ISP abort scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) if (ha->isp_ops->abort_isp(base_vha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) /* failed. retry later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) set_bit(ISP_ABORT_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) clear_bit(ABORT_ISP_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) "ISP abort end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) qla24xx_process_purex_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) (&base_vha->purex_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) clear_bit(PROCESS_PUREX_IOCB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) qla2x00_update_fcports(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) if (IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) goto loop_resync_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) "Quiescence mode scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) if (IS_P3P_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) if (IS_QLA82XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) qla82xx_device_state_handler(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) if (IS_QLA8044(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) qla8044_device_state_handler(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) clear_bit(ISP_QUIESCE_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) if (!ha->flags.quiesce_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) qla2x00_perform_loop_resync(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) if (IS_QLA82XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) qla82xx_idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) qla82xx_clear_qsnt_ready(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) qla82xx_idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) } else if (IS_QLA8044(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) qla8044_idc_lock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) qla8044_clear_qsnt_ready(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) qla8044_idc_unlock(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) clear_bit(ISP_QUIESCE_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) qla2x00_quiesce_io(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) "Quiescence mode end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) if (test_and_clear_bit(RESET_MARKER_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) &base_vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) "Reset marker scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) qla2x00_rst_aen(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) "Reset marker end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) /* Retry each device up to login retry count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) if (!base_vha->relogin_jif ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) time_after_eq(jiffies, base_vha->relogin_jif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) base_vha->relogin_jif = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) ql_dbg(ql_dbg_disc, base_vha, 0x400d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) "Relogin scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) qla24xx_post_relogin_work(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) loop_resync_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) "Loop resync scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) &base_vha->dpc_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) qla2x00_loop_resync(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) clear_bit(LOOP_RESYNC_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) "Loop resync end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) if (IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) goto intr_on_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) atomic_read(&base_vha->loop_state) == LOOP_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) qla2xxx_flash_npiv_conf(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) intr_on_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) if (!ha->interrupts_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) ha->isp_ops->enable_intrs(ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) if (test_and_clear_bit(BEACON_BLINK_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) if (ha->beacon_blink_led == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) ha->isp_ops->beacon_blink(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) /* qpair online check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) if (ha->flags.eeh_busy ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) ha->flags.pci_channel_io_perm_failure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) list_for_each_entry(qpair, &base_vha->qp_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) qp_list_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) qpair->online = online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) ql_log(ql_log_info, base_vha, 0xffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) "nvme: SET ZIO Activity exchange threshold to %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) ha->nvme_last_rptd_aen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) if (qla27xx_set_zio_threshold(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) ha->nvme_last_rptd_aen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) ql_log(ql_log_info, base_vha, 0xffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) ha->nvme_last_rptd_aen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) ql_log(ql_log_info, base_vha, 0xffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) "SET ZIO Activity exchange threshold to %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) ha->last_zio_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) qla27xx_set_zio_threshold(base_vha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) ha->last_zio_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) if (!IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) qla2x00_do_dpc_all_vps(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) if (test_and_clear_bit(N2N_LINK_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) &base_vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) qla2x00_lip_reset(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) ha->dpc_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) end_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) } /* End of while(1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) "DPC handler exiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) * Make sure that nobody tries to wake us up again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) ha->dpc_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) /* Cleanup any residual CTX SRBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) qla2xxx_wake_dpc(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) struct task_struct *t = ha->dpc_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) wake_up_process(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) * qla2x00_rst_aen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) * Processes asynchronous reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) qla2x00_rst_aen(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) if (vha->flags.online && !vha->flags.reset_active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) !atomic_read(&vha->loop_down_timer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) * Issue marker command only when we are going to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) * the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) vha->marker_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) } while (!atomic_read(&vha->loop_down_timer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) * qla2x00_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) * One second timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) * Context: Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) ***************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) qla2x00_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) scsi_qla_host_t *vha = from_timer(vha, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) unsigned long cpu_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055) int start_dpc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) uint16_t w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) struct req_que *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) if (ha->flags.eeh_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) ql_dbg(ql_dbg_timer, vha, 0x6000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) "EEH = %d, restarting timer.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) ha->flags.eeh_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) qla2x00_restart_timer(vha, WATCH_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) * Hardware read to raise pending EEH errors during mailbox waits. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) * the read returns -1 then disable the board.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) if (!pci_channel_offline(ha->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) qla2x00_check_reg16_for_disconnect(vha, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) /* Make sure qla82xx_watchdog is run only for physical port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) start_dpc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) if (IS_QLA82XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) qla82xx_watchdog(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) else if (IS_QLA8044(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) qla8044_watchdog(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) if (!vha->vp_idx && IS_QLAFX00(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) qlafx00_timer_routine(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) /* Loop down handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) if (atomic_read(&vha->loop_down_timer) > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) && vha->flags.online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) if (atomic_read(&vha->loop_down_timer) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) vha->loop_down_abort_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) ql_log(ql_log_info, vha, 0x6008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) "Loop down - aborting the queues before time expires.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) if (!IS_QLA2100(ha) && vha->link_down_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) atomic_set(&vha->loop_state, LOOP_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) * Schedule an ISP abort to return any FCP2-device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) * commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) /* NPIV - scan physical port only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) if (!vha->vp_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) spin_lock_irqsave(&ha->hardware_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) cpu_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) req = ha->req_q_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) for (index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) index < req->num_outstanding_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) fc_port_t *sfcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) sp = req->outstanding_cmds[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) if (!sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) if (sp->cmd_type != TYPE_SRB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) if (sp->type != SRB_SCSI_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) sfcp = sp->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) if (!(sfcp->flags & FCF_FCP2_DEVICE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) if (IS_QLA82XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) set_bit(FCOE_CTX_RESET_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) set_bit(ISP_ABORT_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) spin_unlock_irqrestore(&ha->hardware_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) cpu_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) start_dpc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) /* if the loop has been down for 4 minutes, reinit adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) if (!(vha->device_flags & DFLG_NO_CABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) ql_log(ql_log_warn, vha, 0x6009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) "Loop down - aborting ISP.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) if (IS_QLA82XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) set_bit(FCOE_CTX_RESET_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) set_bit(ISP_ABORT_NEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) ql_dbg(ql_dbg_timer, vha, 0x600a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) "Loop down - seconds remaining %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) atomic_read(&vha->loop_down_timer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) /* Check if beacon LED needs to be blinked for physical host only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) /* There is no beacon_blink function for ISP82xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) if (!IS_P3P_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) start_dpc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) /* Process any deferred work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) if (!list_empty(&vha->work_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) bool q = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) spin_lock_irqsave(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) q = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) spin_unlock_irqrestore(&vha->work_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) if (q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) queue_work(vha->hw->wq, &vha->iocb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) * FC-NVME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) * see if the active AEN count has changed from what was last reported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) index = atomic_read(&ha->nvme_active_aen_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) if (!vha->vp_idx &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) (index != ha->nvme_last_rptd_aen) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) (index >= DEFAULT_ZIO_THRESHOLD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) ha->zio_mode == QLA_ZIO_MODE_6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) !ha->flags.host_shutting_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) ql_log(ql_log_info, vha, 0x3002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) "nvme: Sched: Set ZIO exchange threshold to %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) ha->nvme_last_rptd_aen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) start_dpc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) if (!vha->vp_idx &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) IS_ZIO_THRESHOLD_CAPABLE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) ql_log(ql_log_info, vha, 0x3002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) "Sched: Set ZIO exchange threshold to %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) ha->last_zio_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) start_dpc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) /* Schedule the DPC routine if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) start_dpc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) ql_dbg(ql_dbg_timer, vha, 0x600b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) "isp_abort_needed=%d loop_resync_needed=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) "fcport_update_needed=%d start_dpc=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) "reset_marker_needed=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) start_dpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) ql_dbg(ql_dbg_timer, vha, 0x600c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) "beacon_blink_needed=%d isp_unrecoverable=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) "relogin_needed=%d, Process_purex_iocb=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) qla2x00_restart_timer(vha, WATCH_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) /* Firmware interface routines. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) #define FW_ISP21XX 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) #define FW_ISP22XX 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) #define FW_ISP2300 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) #define FW_ISP2322 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) #define FW_ISP24XX 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) #define FW_ISP25XX 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) #define FW_ISP81XX 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) #define FW_ISP82XX 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) #define FW_ISP2031 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) #define FW_ISP8031 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) #define FW_ISP27XX 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) #define FW_ISP28XX 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) #define FW_FILE_ISP21XX "ql2100_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) #define FW_FILE_ISP22XX "ql2200_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) #define FW_FILE_ISP2300 "ql2300_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) #define FW_FILE_ISP2322 "ql2322_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) #define FW_FILE_ISP24XX "ql2400_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) #define FW_FILE_ISP25XX "ql2500_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) #define FW_FILE_ISP81XX "ql8100_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) #define FW_FILE_ISP82XX "ql8200_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) #define FW_FILE_ISP2031 "ql2600_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) #define FW_FILE_ISP8031 "ql8300_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) #define FW_FILE_ISP27XX "ql2700_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) #define FW_FILE_ISP28XX "ql2800_fw.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281) static DEFINE_MUTEX(qla_fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) static struct fw_blob qla_fw_blobs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) { .name = FW_FILE_ISP24XX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) { .name = FW_FILE_ISP25XX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) { .name = FW_FILE_ISP81XX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) { .name = FW_FILE_ISP82XX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) { .name = FW_FILE_ISP2031, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) { .name = FW_FILE_ISP8031, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) { .name = FW_FILE_ISP27XX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) { .name = FW_FILE_ISP28XX, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) { .name = NULL, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) struct fw_blob *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) qla2x00_request_firmware(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) struct fw_blob *blob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) if (IS_QLA2100(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) blob = &qla_fw_blobs[FW_ISP21XX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) } else if (IS_QLA2200(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) blob = &qla_fw_blobs[FW_ISP22XX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) blob = &qla_fw_blobs[FW_ISP2300];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) blob = &qla_fw_blobs[FW_ISP2322];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) } else if (IS_QLA24XX_TYPE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) blob = &qla_fw_blobs[FW_ISP24XX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) } else if (IS_QLA25XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) blob = &qla_fw_blobs[FW_ISP25XX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) } else if (IS_QLA81XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) blob = &qla_fw_blobs[FW_ISP81XX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) } else if (IS_QLA82XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) blob = &qla_fw_blobs[FW_ISP82XX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) } else if (IS_QLA2031(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) blob = &qla_fw_blobs[FW_ISP2031];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) } else if (IS_QLA8031(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) blob = &qla_fw_blobs[FW_ISP8031];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) } else if (IS_QLA27XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) blob = &qla_fw_blobs[FW_ISP27XX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) } else if (IS_QLA28XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) blob = &qla_fw_blobs[FW_ISP28XX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) if (!blob->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) mutex_lock(&qla_fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) if (blob->fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) ql_log(ql_log_warn, vha, 0x0063,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) "Failed to load firmware image (%s).\n", blob->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) blob->fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) blob = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) mutex_unlock(&qla_fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) return blob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) qla2x00_release_firmware(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) struct fw_blob *blob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) mutex_lock(&qla_fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) for (blob = qla_fw_blobs; blob->name; blob++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) release_firmware(blob->fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) mutex_unlock(&qla_fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) struct qla_qpair *qpair = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) struct scsi_qla_host *vp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) ha->chip_reset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) ha->base_qpair->chip_reset = ha->chip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) for (i = 0; i < ha->max_qpairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) if (ha->queue_pair_map[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) ha->queue_pair_map[i]->chip_reset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) ha->base_qpair->chip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) /* purge MBox commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) if (atomic_read(&ha->num_pend_mbx_stage3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) complete(&ha->mbx_intr_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) while (atomic_read(&ha->num_pend_mbx_stage3) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) atomic_read(&ha->num_pend_mbx_stage2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) atomic_read(&ha->num_pend_mbx_stage1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) if (i > 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) ha->flags.purge_mbox = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) qpair->online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) qla2x00_mark_all_devices_lost(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) list_for_each_entry(vp, &ha->vp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) atomic_inc(&vp->vref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) qla2x00_mark_all_devices_lost(vp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) atomic_dec(&vp->vref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) /* Clear all async request states across all VPs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) list_for_each_entry(fcport, &vha->vp_fcports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) list_for_each_entry(vp, &ha->vp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) atomic_inc(&vp->vref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) list_for_each_entry(fcport, &vp->vp_fcports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) atomic_dec(&vp->vref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) static pci_ers_result_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) scsi_qla_host_t *vha = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) ql_dbg(ql_dbg_aer, vha, 0x9000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) "PCI error detected, state %x.\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) if (!atomic_read(&pdev->enable_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) ql_log(ql_log_info, vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) "PCI device is disabled,state %x\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) case pci_channel_io_normal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) ha->flags.eeh_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) if (ql2xmqsupport || ql2xnvmeenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) return PCI_ERS_RESULT_CAN_RECOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) case pci_channel_io_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) ha->flags.eeh_busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) qla_pci_error_cleanup(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) case pci_channel_io_perm_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) ha->flags.pci_channel_io_perm_failure = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) if (ql2xmqsupport || ql2xnvmeenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) qla2xxx_wake_dpc(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) return PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) static pci_ers_result_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) int risc_paused = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) uint32_t stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) if (IS_QLA82XX(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) return PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) if (IS_QLA2100(ha) || IS_QLA2200(ha)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) stat = rd_reg_word(®->hccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) if (stat & HCCR_RISC_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) risc_paused = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) } else if (IS_QLA23XX(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) stat = rd_reg_dword(®->u.isp2300.host_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) if (stat & HSR_RISC_PAUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) risc_paused = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) } else if (IS_FWI2_CAPABLE(ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) stat = rd_reg_dword(®24->host_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) if (stat & HSRX_RISC_PAUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) risc_paused = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) if (risc_paused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) ql_log(ql_log_info, base_vha, 0x9003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) "RISC paused -- mmio_enabled, Dumping firmware.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) qla2xxx_dump_fw(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) return PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) static pci_ers_result_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) qla2xxx_pci_slot_reset(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) struct qla_qpair *qpair = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) ql_dbg(ql_dbg_aer, base_vha, 0x9004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) "Slot Reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) /* Workaround: qla2xxx driver which access hardware earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) * needs error state to be pci_channel_io_online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) * Otherwise mailbox command timesout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) pdev->error_state = pci_channel_io_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) /* pci_restore_state() clears the saved_state flag of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) * save restored state which resets saved_state flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) if (ha->mem_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) rc = pci_enable_device_mem(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) ql_log(ql_log_warn, base_vha, 0x9005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) "Can't re-enable PCI device after reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) goto exit_slot_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) if (ha->isp_ops->pci_config(base_vha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) goto exit_slot_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) qpair->online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) base_vha->flags.online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) ret = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) exit_slot_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) ql_dbg(ql_dbg_aer, base_vha, 0x900e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) "slot_reset return %x.\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) qla2xxx_pci_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) ql_dbg(ql_dbg_aer, base_vha, 0x900f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) "pci_resume.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) ha->flags.eeh_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) ret = qla2x00_wait_for_hba_online(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) if (ret != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) ql_log(ql_log_fatal, base_vha, 0x9002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) "The device failed to resume I/O from slot/link_reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593) qla_pci_reset_prepare(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595) scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) struct qla_qpair *qpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599) ql_log(ql_log_warn, base_vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) "%s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603) * PCI FLR/function reset is about to reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) * slot. Stop the chip to stop all DMA access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) * It is assumed that pci_reset_done will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) * after FLR to resume Chip operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) ha->flags.eeh_busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611) qpair->online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614) set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) qla2x00_abort_isp_cleanup(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) qla_pci_reset_done(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622) scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) struct qla_qpair *qpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) ql_log(ql_log_warn, base_vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627) "%s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) * FLR just completed by PCI layer. Resume adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) ha->flags.eeh_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) qpair->online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) base_vha->flags.online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) ha->isp_ops->abort_isp(base_vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643) static int qla2xxx_map_queues(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646) scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649) if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650) rc = blk_mq_map_queues(qmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) struct scsi_host_template qla2xxx_driver_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658) .name = QLA2XXX_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) .queuecommand = qla2xxx_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661) .eh_timed_out = fc_eh_timed_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662) .eh_abort_handler = qla2xxx_eh_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663) .eh_device_reset_handler = qla2xxx_eh_device_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) .eh_target_reset_handler = qla2xxx_eh_target_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665) .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666) .eh_host_reset_handler = qla2xxx_eh_host_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668) .slave_configure = qla2xxx_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670) .slave_alloc = qla2xxx_slave_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671) .slave_destroy = qla2xxx_slave_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) .scan_finished = qla2xxx_scan_finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673) .scan_start = qla2xxx_scan_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674) .change_queue_depth = scsi_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675) .map_queues = qla2xxx_map_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676) .this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677) .cmd_per_lun = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) .sg_tablesize = SG_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) .max_sectors = 0xFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681) .shost_attrs = qla2x00_host_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683) .supported_mode = MODE_INITIATOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684) .track_queue_depth = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) .cmd_size = sizeof(srb_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688) static const struct pci_error_handlers qla2xxx_err_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689) .error_detected = qla2xxx_pci_error_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) .mmio_enabled = qla2xxx_pci_mmio_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691) .slot_reset = qla2xxx_pci_slot_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692) .resume = qla2xxx_pci_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) .reset_prepare = qla_pci_reset_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) .reset_done = qla_pci_reset_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) static struct pci_device_id qla2xxx_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7700) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7701) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7702) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7703) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7704) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7705) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7706) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7707) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7708) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7709) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7710) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7711) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7712) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7713) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7714) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7715) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7716) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7717) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7718) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7719) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7720) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7721) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7722) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7723) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7724) { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7725) { 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7726) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7727) MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7729) static struct pci_driver qla2xxx_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7730) .name = QLA2XXX_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7731) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7732) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7733) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7734) .id_table = qla2xxx_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7735) .probe = qla2x00_probe_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7736) .remove = qla2x00_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7737) .shutdown = qla2x00_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7738) .err_handler = &qla2xxx_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7739) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7741) static const struct file_operations apidev_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7742) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7743) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7744) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7747) * qla2x00_module_init - Module initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7748) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7749) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7750) qla2x00_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7752) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7754) BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7755) BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7756) BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7757) BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7758) BUILD_BUG_ON(sizeof(init_cb_t) != 96);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7759) BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7760) BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7761) BUILD_BUG_ON(sizeof(request_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7762) BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7763) BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7764) BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7765) BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7766) BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7767) BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7768) BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7769) BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7770) BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7771) BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7772) BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7773) BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7774) BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7775) BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7776) BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7777) BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7778) BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7779) BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7780) BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7781) BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7782) BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7783) BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7784) BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7785) BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7786) BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7787) BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7788) BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7789) BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7790) BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7791) BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7792) BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7793) BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7794) BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7795) BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7796) BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7797) BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7798) BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7799) BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7800) BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7801) BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7802) BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7803) BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7804) BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7805) BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7806) BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7807) BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7808) BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7809) BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7810) BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7811) BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7812) BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7813) BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7814) BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7815) BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7816) BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7817) BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7818) BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7819) BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7820) BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7821) BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7822) BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7823) BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7824) BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7825) BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7826) BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7827) BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7828) BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7829) BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7830) BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7831) BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7832) BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7833) BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7834) BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7835) BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7836) BUILD_BUG_ON(sizeof(sw_info_t) != 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7837) BUILD_BUG_ON(sizeof(target_id_t) != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7839) /* Allocate cache for SRBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7840) srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7841) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7842) if (srb_cachep == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7843) ql_log(ql_log_fatal, NULL, 0x0001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7844) "Unable to allocate SRB cache...Failing load!.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7845) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7848) /* Initialize target kmem_cache and mem_pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7849) ret = qlt_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7850) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7851) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7852) } else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7854) * If initiator mode is explictly disabled by qlt_init(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7855) * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7856) * performing scsi_scan_target() during LOOP UP event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7858) qla2xxx_transport_functions.disable_target_scan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7859) qla2xxx_transport_vport_functions.disable_target_scan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7862) /* Derive version string. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7863) strcpy(qla2x00_version_str, QLA2XXX_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7864) if (ql2xextended_error_logging)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7865) strcat(qla2x00_version_str, "-debug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7866) if (ql2xextended_error_logging == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7867) ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7869) if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7870) qla_insert_tgt_attrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7872) qla2xxx_transport_template =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7873) fc_attach_transport(&qla2xxx_transport_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7874) if (!qla2xxx_transport_template) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7875) ql_log(ql_log_fatal, NULL, 0x0002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7876) "fc_attach_transport failed...Failing load!.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7877) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7878) goto qlt_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7881) apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7882) if (apidev_major < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7883) ql_log(ql_log_fatal, NULL, 0x0003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7884) "Unable to register char device %s.\n", QLA2XXX_APIDEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7887) qla2xxx_transport_vport_template =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7888) fc_attach_transport(&qla2xxx_transport_vport_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7889) if (!qla2xxx_transport_vport_template) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7890) ql_log(ql_log_fatal, NULL, 0x0004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7891) "fc_attach_transport vport failed...Failing load!.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7892) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7893) goto unreg_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7895) ql_log(ql_log_info, NULL, 0x0005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7896) "QLogic Fibre Channel HBA Driver: %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7897) qla2x00_version_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7898) ret = pci_register_driver(&qla2xxx_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7899) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7900) ql_log(ql_log_fatal, NULL, 0x0006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7901) "pci_register_driver failed...ret=%d Failing load!.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7902) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7903) goto release_vport_transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7905) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7907) release_vport_transport:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7908) fc_release_transport(qla2xxx_transport_vport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7910) unreg_chrdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7911) if (apidev_major >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7912) unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7913) fc_release_transport(qla2xxx_transport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7915) qlt_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7916) qlt_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7918) destroy_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7919) kmem_cache_destroy(srb_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7920) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7923) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7924) * qla2x00_module_exit - Module cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7925) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7926) static void __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7927) qla2x00_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7929) pci_unregister_driver(&qla2xxx_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7930) qla2x00_release_firmware();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7931) kmem_cache_destroy(ctx_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7932) fc_release_transport(qla2xxx_transport_vport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7933) if (apidev_major >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7934) unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7935) fc_release_transport(qla2xxx_transport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7936) qlt_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7937) kmem_cache_destroy(srb_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7940) module_init(qla2x00_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7941) module_exit(qla2x00_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7943) MODULE_AUTHOR("QLogic Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7944) MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7945) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7946) MODULE_FIRMWARE(FW_FILE_ISP21XX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7947) MODULE_FIRMWARE(FW_FILE_ISP22XX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7948) MODULE_FIRMWARE(FW_FILE_ISP2300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7949) MODULE_FIRMWARE(FW_FILE_ISP2322);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7950) MODULE_FIRMWARE(FW_FILE_ISP24XX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7951) MODULE_FIRMWARE(FW_FILE_ISP25XX);