Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright 2014 Cisco Systems, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * This program is free software; you may redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <scsi/scsi_dbg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "snic_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "snic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define snic_cmd_tag(sc)	(((struct scsi_cmnd *) sc)->request->tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) const char *snic_state_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	[SNIC_INIT]	= "SNIC_INIT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	[SNIC_ERROR]	= "SNIC_ERROR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	[SNIC_ONLINE]	= "SNIC_ONLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	[SNIC_OFFLINE]	= "SNIC_OFFLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	[SNIC_FWRESET]	= "SNIC_FWRESET",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static const char * const snic_req_state_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	[SNIC_IOREQ_NOT_INITED]	= "SNIC_IOREQ_NOT_INITED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	[SNIC_IOREQ_PENDING]	= "SNIC_IOREQ_PENDING",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	[SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	[SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPLETE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	[SNIC_IOREQ_LR_PENDING]	= "SNIC_IOREQ_LR_PENDING",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	[SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPLETE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	[SNIC_IOREQ_COMPLETE]	= "SNIC_IOREQ_CMD_COMPLETE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /* snic cmd status strings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static const char * const snic_io_status_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	[SNIC_STAT_IO_SUCCESS]	= "SNIC_STAT_IO_SUCCESS", /* 0x0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	[SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	[SNIC_STAT_OUT_OF_RES]	= "SNIC_STAT_OUT_OF_RES",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	[SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	[SNIC_STAT_REQ_NOT_SUP]	= "SNIC_STAT_REQ_NOT_SUP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	[SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	[SNIC_STAT_ABORTED]	= "SNIC_STAT_ABORTED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	[SNIC_STAT_TIMEOUT]	= "SNIC_STAT_TIMEOUT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	[SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	[SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	[SNIC_STAT_FW_ERR]	= "SNIC_STAT_FW_ERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	[SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	[SNIC_STAT_ITMF_FAIL]	= "SNIC_STAT_ITMF_FAIL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	[SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	[SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	[SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	[SNIC_STAT_NO_BOOTLUN]	= "SNIC_STAT_NO_BOOTLUN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	[SNIC_STAT_SCSI_ERR]	= "SNIC_STAT_SCSI_ERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	[SNIC_STAT_NOT_READY]	= "SNIC_STAT_NOT_READY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	[SNIC_STAT_FATAL_ERROR]	= "SNIC_STAT_FATAL_ERROR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static void snic_scsi_cleanup(struct snic *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) const char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) snic_state_to_str(unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	return snic_state_str[state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) static const char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) snic_io_status_to_str(unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	if ((state >= ARRAY_SIZE(snic_io_status_str)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	     (!snic_io_status_str[state]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	return snic_io_status_str[state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static const char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) snic_ioreq_state_to_str(unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	if (state >= ARRAY_SIZE(snic_req_state_str) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 			!snic_req_state_str[state])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	return snic_req_state_str[state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) static inline spinlock_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	return &snic->io_req_lock[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static inline spinlock_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) snic_io_lock_tag(struct snic *snic, int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) /* snic_release_req_buf : Releases snic_req_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) snic_release_req_buf(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		   struct snic_req_info *rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		   struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	struct snic_host_req *req = rqi_to_req(rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	/* Freeing cmd without marking completion, not okay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		      (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		      (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		      (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		      (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		      (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		      (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		      "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		      sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		      rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (req->u.icmnd.sense_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		dma_unmap_single(&snic->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 				 le64_to_cpu(req->u.icmnd.sense_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 				 SCSI_SENSE_BUFFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 				 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	scsi_dma_unmap(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	snic_req_free(snic, rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) } /* end of snic_release_req_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * snic_queue_icmnd_req : Queues snic_icmnd request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) snic_queue_icmnd_req(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		     struct snic_req_info *rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		     struct scsi_cmnd *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		     int sg_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct snic_sg_desc *sgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	dma_addr_t pa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	struct scsi_lun lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u16 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	if (sg_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		flags = SNIC_ICMND_ESGL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			sgd->addr = cpu_to_le64(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			sgd->len = cpu_to_le32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			sgd->_resvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			sgd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	pa = dma_map_single(&snic->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			    sc->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			    SCSI_SENSE_BUFFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			    DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (dma_mapping_error(&snic->pdev->dev, pa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			      "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			      sc->sense_buffer, snic_cmd_tag(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	int_to_scsilun(sc->device->lun, &lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	if (sc->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		flags |= SNIC_ICMND_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	if (sc->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		flags |= SNIC_ICMND_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	/* Initialize icmnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	snic_icmnd_init(rqi->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			snic_cmd_tag(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			snic->config.hid, /* hid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			(ulong) rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			flags, /* command flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			rqi->tgt_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			lun.scsi_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			sc->cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			sc->cmd_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			scsi_bufflen(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			sg_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			(ulong) req_to_sgl(rqi->req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			pa, /* sense buffer pa */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	atomic64_inc(&snic->s_stats.io.active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		atomic64_dec(&snic->s_stats.io.active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			      "QIcmnd: Queuing Icmnd Failed. ret = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			      ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		snic_stats_update_active_ios(&snic->s_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) } /* end of snic_queue_icmnd_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * snic_issue_scsi_req : Prepares IO request and Issues to FW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) snic_issue_scsi_req(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		      struct snic_tgt *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		      struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	int sg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	u32 tag = snic_cmd_tag(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u64 cmd_trc = 0, cmd_st_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	CMD_FLAGS(sc) = SNIC_NO_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	sg_cnt = scsi_dma_map(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if (sg_cnt < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			 sc->cmnd[0], sg_cnt, CMD_STATE(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		goto issue_sc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	rqi = snic_req_init(snic, sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		scsi_dma_unmap(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		goto issue_sc_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	rqi->tgt_id = tgt->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	rqi->sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	CMD_STATE(sc) = SNIC_IOREQ_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	CMD_SP(sc) = (char *) rqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	cmd_trc = SNIC_TRC_CMD(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	/* create wq desc and enqueue it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			      "issue_sc: icmnd qing Failed for sc %p, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			      sc, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			snic_release_req_buf(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			 SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		u32 io_sz = scsi_bufflen(sc) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		u32 qtime = jiffies - rqi->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		struct snic_io_stats *iostats = &snic->s_stats.io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		if (io_sz > atomic64_read(&iostats->max_io_sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			atomic64_set(&iostats->max_io_sz, io_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (qtime > atomic64_read(&iostats->max_qtime))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			atomic64_set(&iostats->max_qtime, qtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			      "issue_sc:sc %p, tag %d queued to WQ.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			      sc, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			 sg_cnt, cmd_trc, cmd_st_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) issue_sc_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) } /* end of snic_issue_scsi_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * snic_queuecommand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * Routine to send a scsi cdb to LLD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * Called with host_lock held and interrupts disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	struct snic_tgt *tgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	struct snic *snic = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	tgt = starget_to_tgt(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	ret = snic_tgt_chkready(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		sc->result = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (snic_get_state(snic) != SNIC_ONLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		SNIC_HOST_ERR(shost, "snic state is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			      snic_state_str[snic_get_state(snic)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	atomic_inc(&snic->ios_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		      sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	ret = snic_issue_scsi_req(snic, tgt, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		ret = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	atomic_dec(&snic->ios_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) } /* end of snic_queuecommand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * snic_process_abts_pending_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * caller should hold IO lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) snic_proc_tmreq_pending_state(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			      struct scsi_cmnd *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			      u8 cmpl_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	int state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	if (state == SNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	else if (state == SNIC_IOREQ_LR_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		SNIC_BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	switch (cmpl_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	case SNIC_STAT_IO_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		CMD_FLAGS(sc) |= SNIC_IO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	case SNIC_STAT_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		CMD_FLAGS(sc) |= SNIC_IO_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		SNIC_BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  * snic_process_io_failed_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  * Processes IO's error states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) snic_process_io_failed_state(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 			     struct snic_icmnd_cmpl *icmnd_cmpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			     struct scsi_cmnd *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			     u8 cmpl_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	switch (cmpl_stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	case SNIC_STAT_TIMEOUT:		/* Req was timedout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		atomic64_inc(&snic->s_stats.misc.io_tmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		res = DID_TIME_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	case SNIC_STAT_ABORTED:		/* Req was aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		atomic64_inc(&snic->s_stats.misc.io_aborted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		res = DID_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		atomic64_inc(&snic->s_stats.misc.data_cnt_mismat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		res = DID_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		atomic64_inc(&snic->s_stats.fw.out_of_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		res = DID_REQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	case SNIC_STAT_IO_NOT_FOUND:	/* Requested I/O was not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		atomic64_inc(&snic->s_stats.io.io_not_found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		res = DID_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	case SNIC_STAT_SGL_INVALID:	/* Req was aborted to due to sgl error*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		atomic64_inc(&snic->s_stats.misc.sgl_inval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		res = DID_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	case SNIC_STAT_FW_ERR:		/* Req terminated due to FW Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		atomic64_inc(&snic->s_stats.fw.io_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		res = DID_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	case SNIC_STAT_SCSI_ERR:	/* FW hits SCSI Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		atomic64_inc(&snic->s_stats.fw.scsi_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	case SNIC_STAT_NOT_READY:	/* XPT yet to initialize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	case SNIC_STAT_DEV_OFFLINE:	/* Device offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		res = DID_NO_CONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	case SNIC_STAT_INVALID_HDR:	/* Hdr contains invalid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	case SNIC_STAT_INVALID_PARM:	/* Some param in req is invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	case SNIC_STAT_REQ_NOT_SUP:	/* Req type is not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	case SNIC_STAT_CMND_REJECT:	/* Req rejected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	case SNIC_STAT_FATAL_ERROR:	/* XPT Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			      "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		res = DID_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		      snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	/* Set sc->result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	sc->result = (res << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) } /* end of snic_process_io_failed_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  * snic_tmreq_pending : is task management in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) snic_tmreq_pending(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	int state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	return ((state == SNIC_IOREQ_ABTS_PENDING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			(state == SNIC_IOREQ_LR_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * snic_process_icmnd_cmpl_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * Caller should hold io_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) snic_process_icmnd_cmpl_status(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			       struct snic_icmnd_cmpl *icmnd_cmpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			       u8 cmpl_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			       struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	u8 scsi_stat = icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	u64 xfer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	/* Mark the IO as complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		sc->result = (DID_OK << 16) | scsi_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		xfer_len = scsi_bufflen(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		/* Update SCSI Cmd with resid value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			xfer_len -= le32_to_cpu(icmnd_cmpl->resid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			atomic64_inc(&snic->s_stats.misc.io_under_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			atomic64_inc(&snic->s_stats.misc.qfull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		atomic64_inc(&snic->s_stats.io.fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			      "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			      snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) } /* end of snic_process_icmnd_cmpl_status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  * snic_icmnd_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  * Routine to handle icmnd completions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	u8 typ, hdr_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	u32 cmnd_id, hid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	ulong ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	struct scsi_cmnd *sc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct snic_icmnd_cmpl *icmnd_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct snic_host_req *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	unsigned long flags, start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	u8 sc_stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	icmnd_cmpl = &fwreq->u.icmnd_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	sc_stat = icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		      "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		      typ, hdr_stat, cmnd_id, hid, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	if (cmnd_id >= snic->max_tag_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			      "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			      cmnd_id, snic_io_status_to_str(hdr_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	sc = scsi_host_find_tag(snic->shost, cmnd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	WARN_ON_ONCE(!sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		atomic64_inc(&snic->s_stats.io.sc_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			      "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			      snic_io_status_to_str(hdr_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			      cmnd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			      fwreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		SNIC_TRC(snic->shost->host_no, cmnd_id, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			 ((u64)hdr_stat << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			  (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			 (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		      "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		      sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		      CMD_FLAGS(sc), rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	WARN_ON_ONCE(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		atomic64_inc(&snic->s_stats.io.req_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			      "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			      snic_io_status_to_str(hdr_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			      cmnd_id, sc, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	rqi = (struct snic_req_info *) ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	start_time = rqi->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	/* firmware completed the io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	rqi->io_cmpl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * if SCSI-ML has already issued abort on this command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * ignore completion of the IO. The abts path will clean it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (unlikely(snic_tmreq_pending(sc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		snic_proc_tmreq_pending_state(snic, sc, hdr_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		snic_stats_update_io_cmpl(&snic->s_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		/* Expected value is SNIC_STAT_ABORTED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		if (likely(hdr_stat == SNIC_STAT_ABORTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			      "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			      snic_ioreq_state_to_str(CMD_STATE(sc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			      snic_io_status_to_str(hdr_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			      sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		scsi_print_command(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			      "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			      sc, sc->cmnd[0], cmnd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			      snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	/* Break link with the SCSI Command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	CMD_FLAGS(sc) |= SNIC_IO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/* For now, consider only successful IO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	snic_calc_io_process_time(snic, rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	snic_release_req_buf(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (sc->scsi_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	snic_stats_update_io_cmpl(&snic->s_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) } /* end of snic_icmnd_cmpl_handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) snic_proc_dr_cmpl_locked(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			 struct snic_fw_req *fwreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			 u8 cmpl_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			 u32 cmnd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			 struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	u32 start_time = rqi->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	CMD_LR_STATUS(sc) = cmpl_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		      snic_ioreq_state_to_str(CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			 jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			      "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			      (int)(cmnd_id & SNIC_TAG_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			      snic_io_status_to_str(cmpl_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			 jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			      "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			      (int)(cmnd_id & SNIC_TAG_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			      snic_io_status_to_str(cmpl_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		      "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		      (int)(cmnd_id & SNIC_TAG_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		      snic_io_status_to_str(cmpl_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (rqi->dr_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		complete(rqi->dr_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) } /* end of snic_proc_dr_cmpl_locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * snic_update_abort_stats : Updates abort stats based on completion status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) snic_update_abort_stats(struct snic *snic, u8 cmpl_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct snic_abort_stats *abt_stats = &snic->s_stats.abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	switch (cmpl_stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	case  SNIC_STAT_IO_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	case SNIC_STAT_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		atomic64_inc(&abt_stats->fw_tmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	case SNIC_STAT_IO_NOT_FOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		atomic64_inc(&abt_stats->io_not_found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		atomic64_inc(&abt_stats->fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) snic_process_itmf_cmpl(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		       struct snic_fw_req *fwreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		       u32 cmnd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		       u8 cmpl_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		       struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	u32 tm_tags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	u32 start_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	WARN_ON_ONCE(!rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		atomic64_inc(&snic->s_stats.io.req_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			      "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			      snic_io_status_to_str(cmpl_stat), cmnd_id, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	/* Extract task management flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	tm_tags = cmnd_id & ~(SNIC_TAG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	start_time = rqi->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	cmnd_id &= (SNIC_TAG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	switch (tm_tags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	case SNIC_TAG_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		/* Abort only issued on cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		snic_update_abort_stats(snic, cmpl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			/* This is a late completion. Ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		CMD_ABTS_STATUS(sc) = cmpl_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			      "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			      cmnd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			      snic_io_status_to_str(cmpl_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		 * If scsi_eh thread is blocked waiting for abts complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		 * signal completion to it. IO will be cleaned in the thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		 * else clean it in this context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		if (rqi->abts_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			complete(rqi->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			break; /* jump out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		sc->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			      "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			      sc, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		snic_release_req_buf(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		if (sc->scsi_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 				 jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 				 (ulong) fwreq, SNIC_TRC_CMD(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 				 SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 			sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	case SNIC_TAG_DEV_RST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		/* Abort and terminate completion of device reset req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		CMD_ABTS_STATUS(sc) = cmpl_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			      "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			      cmnd_id, snic_io_status_to_str(cmpl_stat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		if (rqi->abts_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			complete(rqi->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			      "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			      "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			      snic_ioreq_state_to_str(CMD_STATE(sc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			      cmnd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		SNIC_BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) } /* end of snic_process_itmf_cmpl_status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916)  * snic_itmf_cmpl_handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917)  * Routine to handle itmf completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	struct scsi_cmnd  *sc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	struct snic_itmf_cmpl *itmf_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	ulong ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	u32 cmnd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	u32 hid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	u8 typ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	u8 hdr_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		      "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		      __func__, typ, hdr_stat, cmnd_id, hid, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	itmf_cmpl = &fwreq->u.itmf_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		      "Itmf_cmpl: nterm %u , flags 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		      le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	/* spl case, dev reset issued through ioctl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		rqi = (struct snic_req_info *) ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		sc = rqi->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		goto ioctl_dev_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			      "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			      cmnd_id, snic_io_status_to_str(hdr_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		SNIC_BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	WARN_ON_ONCE(!sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) ioctl_dev_rst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		atomic64_inc(&snic->s_stats.io.sc_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			      "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			      snic_io_status_to_str(hdr_stat), cmnd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) } /* end of snic_itmf_cmpl_handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	struct snic_stats *st = &snic->s_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	long act_ios = 0, act_fwreqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	snic_scsi_cleanup(snic, snic_cmd_tag(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	/* Update stats on pending IOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	act_ios = atomic64_read(&st->io.active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	atomic64_add(act_ios, &st->io.compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	atomic64_sub(act_ios, &st->io.active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	act_fwreqs = atomic64_read(&st->fw.actv_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	atomic64_sub(act_fwreqs, &st->fw.actv_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * snic_hba_reset_cmpl_handler :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * Notes :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * 1. Cleanup all the scsi cmds, release all snic specific cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  * 2. Issue Report Targets in case of SAN targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	ulong ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	u32 cmnd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	u32 hid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	u8 typ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	u8 hdr_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	struct scsi_cmnd *sc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	unsigned long flags, gflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	SNIC_HOST_INFO(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		       "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		       cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		      "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		      typ, hdr_stat, cmnd_id, hid, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/* spl case, host reset issued through ioctl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (cmnd_id == SCSI_NO_TAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		rqi = (struct snic_req_info *) ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		SNIC_HOST_INFO(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			       "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			       cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		sc = rqi->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		goto ioctl_hba_rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	if (cmnd_id >= snic->max_tag_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			      "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			      cmnd_id, snic_io_status_to_str(hdr_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		SNIC_BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	sc = scsi_host_find_tag(snic->shost, cmnd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ioctl_hba_rst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		atomic64_inc(&snic->s_stats.io.sc_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			      "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			      snic_io_status_to_str(hdr_stat), cmnd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	SNIC_HOST_INFO(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		       "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		       sc, rqi, cmnd_id, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (!snic->remove_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			      "reset_cmpl:host reset completed after timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	WARN_ON_ONCE(!rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		atomic64_inc(&snic->s_stats.io.req_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			      "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			      snic_io_status_to_str(hdr_stat), cmnd_id, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			      CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	/* stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	/* scsi cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	snic_hba_reset_scsi_cleanup(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		    snic_get_state(snic) != SNIC_FWRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	/* Careful locking between snic_lock and io lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	spin_lock_irqsave(&snic->snic_lock, gflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (snic_get_state(snic) == SNIC_FWRESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		snic_set_state(snic, SNIC_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	spin_unlock_irqrestore(&snic->snic_lock, gflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	if (snic->remove_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		complete(snic->remove_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	/* Rediscovery is for SAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	if (snic->config.xpt_type == SNIC_DAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	queue_work(snic_glob->event_q, &snic->disc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	SNIC_ASSERT_NOT_IMPL(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	u8 typ, hdr_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	u32 cmnd_id, hid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	ulong ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	struct snic_async_evnotify *aen = &fwreq->u.async_ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	u32 event_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		      "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		      typ, hdr_stat, cmnd_id, hid, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	event_id = le32_to_cpu(aen->ev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	switch (event_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	case SNIC_EV_TGT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	case SNIC_EV_TGT_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	case SNIC_EV_LUN_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	case SNIC_EV_LUN_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	case SNIC_EV_CONF_CHG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	case SNIC_EV_TGT_ADDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	case SNIC_EV_TGT_DELTD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	case SNIC_EV_LUN_ADDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	case SNIC_EV_LUN_DELTD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	case SNIC_EV_DISC_CMPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		SNIC_BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	SNIC_ASSERT_NOT_IMPL(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) } /* end of snic_aen_handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)  * snic_io_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)  * Routine to process CQ entries(IO Completions) posted by fw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) snic_io_cmpl_handler(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		     unsigned int cq_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		     struct snic_fw_req *fwreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	struct snic *snic = svnic_dev_priv(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	u64 start = jiffies, cmpl_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	/* Update FW Stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		(fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		atomic64_dec(&snic->s_stats.fw.actv_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		    (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	/* Check for snic subsys errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	switch (fwreq->hdr.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	case SNIC_STAT_NOT_READY:	/* XPT yet to initialize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			      "sNIC SubSystem is NOT Ready.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	case SNIC_STAT_FATAL_ERROR:	/* XPT Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			      "sNIC SubSystem in Unrecoverable State.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	switch (fwreq->hdr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	case SNIC_RSP_EXCH_VER_CMPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		snic_io_exch_ver_cmpl_handler(snic, fwreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	case SNIC_RSP_REPORT_TGTS_CMPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		snic_report_tgt_cmpl_handler(snic, fwreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	case SNIC_RSP_ICMND_CMPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		snic_icmnd_cmpl_handler(snic, fwreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	case SNIC_RSP_ITMF_CMPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		snic_itmf_cmpl_handler(snic, fwreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	case SNIC_RSP_HBA_RESET_CMPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		snic_hba_reset_cmpl_handler(snic, fwreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	case SNIC_MSG_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		snic_msg_ack_handler(snic, fwreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	case SNIC_MSG_ASYNC_EVNOTIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		snic_aen_handler(snic, fwreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		SNIC_BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			      "Unknown Firmware completion request type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			      fwreq->hdr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	/* Update Stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	cmpl_time = jiffies - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) } /* end of snic_io_cmpl_handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)  * snic_fwcq_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)  * Routine to process fwCQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  * This CQ is independent, and not associated with wq/rq/wq_copy queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	unsigned int num_ent = 0;	/* number cq entries processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	unsigned int cq_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	unsigned int nent_per_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	struct snic_misc_stats *misc_stats = &snic->s_stats.misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 						 snic_io_cmpl_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 						 io_cmpl_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		num_ent += nent_per_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			atomic64_set(&misc_stats->max_cq_ents, nent_per_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	return num_ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) } /* end of snic_fwcq_cmpl_handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  * snic_queue_itmf_req: Common API to queue Task Management requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  * Use rqi->tm_tag for passing special tags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  * @req_id : aborted request's tag, -1 for lun reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) snic_queue_itmf_req(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		    struct snic_host_req *tmreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		    struct scsi_cmnd *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		    u32 tmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		    u32 req_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	struct snic_req_info *rqi = req_to_rqi(tmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	struct scsi_lun lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	SNIC_BUG_ON(!rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	SNIC_BUG_ON(!rqi->tm_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	/* fill in lun info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	int_to_scsilun(sc->device->lun, &lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	/* Initialize snic_host_req: itmf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	snic_itmf_init(tmreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		       tm_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		       snic->config.hid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		       (ulong) rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		       0 /* flags */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		       req_id, /* Command to be aborted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		       rqi->tgt_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		       lun.scsi_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		       tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	 * In case of multiple aborts on same cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	 * use try_wait_for_completion and completion_done() to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	 * whether it queues aborts even after completion of abort issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	 * prior.SNIC_BUG_ON(completion_done(&rqi->done));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			      "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			      tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			      "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			      tmf, sc, rqi, req_id, snic_cmd_tag(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) } /* end of snic_queue_itmf_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) snic_issue_tm_req(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		    struct snic_req_info *rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		    struct scsi_cmnd *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		    int tmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	struct snic_host_req *tmreq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	int req_id = 0, tag = snic_cmd_tag(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	if (snic_get_state(snic) == SNIC_FWRESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	atomic_inc(&snic->ios_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		      "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		      tmf, rqi, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	if (tmf == SNIC_ITMF_LUN_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		tmreq = snic_dr_req_init(snic, rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		req_id = SCSI_NO_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		tmreq = snic_abort_req_init(snic, rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		req_id = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (!tmreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		goto tmreq_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) tmreq_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			      "issu_tmreq: Queueing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			      tmf, sc, rqi, req_id, tag, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			      "issu_tmreq: Queueing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			      tmf, sc, rqi, req_id, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	atomic_dec(&snic->ios_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  * snic_queue_abort_req : Queues abort req to WQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) snic_queue_abort_req(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		     struct snic_req_info *rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		     struct scsi_cmnd *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		     int tmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		      sc, rqi, snic_cmd_tag(sc), tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	/* Add special tag for abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	rqi->tm_tag |= SNIC_TAG_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	return snic_issue_tm_req(snic, rqi, sc, tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  * snic_abort_finish : called by snic_abort_cmd on queuing abort successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	int ret = 0, tag = snic_cmd_tag(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		atomic64_inc(&snic->s_stats.io.req_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			      "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			      tag, sc, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		goto abort_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	rqi->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	/* Check the abort status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	switch (CMD_ABTS_STATUS(sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	case SNIC_INVALID_CODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		/* Firmware didn't complete abort req, timedout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		atomic64_inc(&snic->s_stats.abts.drv_tmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			      "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			      sc, snic_cmd_tag(sc), CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		/* do not release snic request in timedout case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		goto abort_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	case SNIC_STAT_IO_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	case SNIC_STAT_IO_NOT_FOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		 * If abort path doesn't call scsi_done(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		 * the # IO timeouts == 2, will cause the LUN offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		 * Call scsi_done to complete the IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		sc->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		/* Firmware completed abort with error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	SNIC_HOST_INFO(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		       "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		       tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		       CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) abort_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		snic_release_req_buf(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) } /* end of snic_abort_finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)  * snic_send_abort_and_wait : Issues Abort, and Waits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	enum snic_ioreq_state sv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	struct snic_tgt *tgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	DECLARE_COMPLETION_ONSTACK(tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	int ret = 0, tmf = 0, tag = snic_cmd_tag(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	tgt = starget_to_tgt(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		tmf = SNIC_ITMF_ABTS_TASK_TERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		tmf = SNIC_ITMF_ABTS_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	/* stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	 * Avoid a race between SCSI issuing the abort and the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	 * completing the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	 * If the command is already completed by fw_cmpl code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	 * we just return SUCCESS from here. This means that the abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	 * succeeded. In the SCSI ML, since the timeout for command has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	 * happend, the completion wont actually complete the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	 * and it will be considered as an aborted command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	 * The CMD_SP will not be cleared except while holding io_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			      "abt_cmd: rqi is null. Tag %d flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			      tag, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		goto send_abts_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	rqi->abts_done = &tm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		goto abts_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	SNIC_BUG_ON(!rqi->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	/* Save Command State, should be restored on failed to Queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	sv_state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	 * Command is still pending, need to abort it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	 * If the fw completes the command after this point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	 * the completion won't be done till mid-layer, since abot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	 * has already started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	/* Now Queue the abort command to firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	ret = snic_queue_abort_req(snic, rqi, sc, tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		atomic64_inc(&snic->s_stats.abts.q_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			      "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			      tag, ret, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		/* Restore Command's previous state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		CMD_STATE(sc) = sv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			rqi->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		goto send_abts_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	if (tmf == SNIC_ITMF_ABTS_TASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		atomic64_inc(&snic->s_stats.abts.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		/* term stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		      "send_abt_cmd: sc %p Tag %x flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		      sc, tag, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) abts_pending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	 * Queued an abort IO, wait for its completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	 * Once the fw completes the abort command, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 * wakeup this thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) send_abts_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) } /* end of snic_send_abort_and_wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)  * This function is exported to SCSI for sending abort cmnds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  * A SCSI IO is represent by snic_ioreq in the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)  * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) snic_abort_cmd(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	struct snic *snic = shost_priv(sc->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	int ret = SUCCESS, tag = snic_cmd_tag(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	u32 start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		       sc, sc->cmnd[0], sc->request, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			      "abt_cmd: tag %x Parent Devs are not rdy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 			      tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		ret = FAST_IO_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		goto abort_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	ret = snic_send_abort_and_wait(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		goto abort_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	ret = snic_abort_finish(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) abort_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		 jiffies_to_msecs(jiffies - start_time), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		      "abts: Abort Req Status = %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		      (ret == SUCCESS) ? "SUCCESS" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		       ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	struct scsi_cmnd *sc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	struct scsi_device *lr_sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	u32 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	if (lr_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		lr_sdev = lr_sc->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	/* walk through the tag map, an dcheck if IOs are still pending in fw*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	for (tag = 0; tag < snic->max_tag_id; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		io_lock = snic_io_lock_tag(snic, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		sc = scsi_host_find_tag(snic->shost, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		 * Found IO that is still pending w/ firmware and belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		 * the LUN that is under reset, if lr_sc != NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 			      snic_ioreq_state_to_str(CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) } /* end of snic_is_abts_pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) snic_dr_clean_single_req(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 			 u32 tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			 struct scsi_device *lr_sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	struct snic_tgt *tgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	struct scsi_cmnd *sc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	u32 sv_state = 0, tmf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	DECLARE_COMPLETION_ONSTACK(tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	io_lock = snic_io_lock_tag(snic, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	sc = scsi_host_find_tag(snic->shost, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	/* Ignore Cmd that don't belong to Lun Reset device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	if (!sc || sc->device != lr_sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		goto skip_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	if (!rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		goto skip_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		goto skip_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			(!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 			      "clean_single_req: devrst is not pending sc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 			      sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		goto skip_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		"clean_single_req: Found IO in %s on lun\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		snic_ioreq_state_to_str(CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	/* Save Command State */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	sv_state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	 * Any pending IO issued prior to reset is expected to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	 * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	 * to indicate the IO is abort pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	 * When IO is completed, the IO will be handed over and handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	 * in this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	SNIC_BUG_ON(rqi->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		rqi->tm_tag = SNIC_TAG_DEV_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			      "clean_single_req:devrst sc 0x%p\n", sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	rqi->abts_done = &tm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	tgt = starget_to_tgt(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		tmf = SNIC_ITMF_ABTS_TASK_TERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		tmf = SNIC_ITMF_ABTS_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	/* Now queue the abort command to firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	ret = snic_queue_abort_req(snic, rqi, sc, tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			      "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 			      sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 			rqi->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		/* Restore Command State */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 			CMD_STATE(sc) = sv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		goto skip_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	/* Recheck cmd state to check if it now aborted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		goto skip_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	rqi->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	/* if abort is still pending w/ fw, fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 			      "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			      sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		goto skip_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	snic_release_req_buf(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	sc->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) skip_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) } /* end of snic_dr_clean_single_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	struct scsi_device *lr_sdev = lr_sc->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	u32 tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	for (tag = 0; tag < snic->max_tag_id; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		if (tag == snic_cmd_tag(lr_sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		ret = snic_dr_clean_single_req(snic, tag, lr_sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 			SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			goto clean_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	schedule_timeout(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	/* Walk through all the cmds and check abts status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	if (snic_is_abts_pending(snic, lr_sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		goto clean_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) clean_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		      "Failed to Clean Pending IOs on %s device.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		      dev_name(&lr_sdev->sdev_gendev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) } /* end of snic_dr_clean_pending_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)  * snic_dr_finish : Called by snic_device_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	int lr_res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 			      "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 			      snic_cmd_tag(sc), sc, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		goto dr_fini_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	rqi->dr_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	lr_res = CMD_LR_STATUS(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	switch (lr_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	case SNIC_INVALID_CODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		/* stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 			      "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			      snic_cmd_tag(sc), CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		goto dr_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	case SNIC_STAT_IO_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 			      "dr_fini: Tag %x Dev Reset cmpl\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 			      snic_cmd_tag(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 			      "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 			      snic_cmd_tag(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 			      snic_io_status_to_str(lr_res), CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		goto dr_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	 * Cleanup any IOs on this LUN that have still not completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	 * If any of these fail, then LUN Reset fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	 * Cleanup cleans all commands on this LUN except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	 * the lun reset command. If all cmds get cleaned, the LUN Reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	 * succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	ret = snic_dr_clean_pending_req(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			      "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			      snic_cmd_tag(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		goto dr_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		/* Cleanup LUN Reset Command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 			ret = SUCCESS; /* Completed Successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) dr_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	lockdep_assert_held(io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		snic_release_req_buf(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) dr_fini_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) } /* end of snic_dr_finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) snic_queue_dr_req(struct snic *snic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		  struct snic_req_info *rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		  struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	/* Add special tag for device reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	rqi->tm_tag |= SNIC_TAG_DEV_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	enum snic_ioreq_state sv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	DECLARE_COMPLETION_ONSTACK(tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	int ret = FAILED, tag = snic_cmd_tag(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	CMD_FLAGS(sc) |= SNIC_DEVICE_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			      "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			      tag, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		goto send_dr_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	/* Save Command state to restore in case Queuing failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	sv_state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	CMD_LR_STATUS(sc) = SNIC_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	rqi->dr_done = &tm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	SNIC_BUG_ON(!rqi->dr_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	 * The Command state is changed to IOREQ_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	 * in this case, if the command is completed, the icmnd_cmpl will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	 * mark the cmd as completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	 * This logic still makes LUN Reset is inevitable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	ret = snic_queue_dr_req(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 			      "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 			      tag, ret, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		/* Restore State */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		CMD_STATE(sc) = sv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 			rqi->dr_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		/* rqi is freed in caller. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		goto send_dr_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) send_dr_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)  * auxillary funciton to check lun reset op is supported or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)  * Not supported if returns 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) snic_dev_reset_supported(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	if (tgt->tdata.typ == SNIC_TGT_DAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	u32 start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	if (rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		start_time = rqi->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	CMD_FLAGS(sc) |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		snic_release_req_buf(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		 jiffies_to_msecs(jiffies - start_time), (ulong) rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  * fail to get aborted. It calls driver's eh_device_reset with a SCSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)  * command on the LUN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) snic_device_reset(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	struct Scsi_Host *shost = sc->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	struct snic *snic = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	int tag = snic_cmd_tag(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	int start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	int dr_supp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		      sc, sc->cmnd[0], sc->request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		      snic_cmd_tag(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	dr_supp = snic_dev_reset_supported(sc->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	if (!dr_supp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		/* device reset op is not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		goto dev_rst_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		snic_unlink_and_release_req(snic, sc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		goto dev_rst_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	/* There is no tag when lun reset is issue through ioctl. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	if (unlikely(tag <= SNIC_NO_TAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		SNIC_HOST_INFO(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 			       "Devrst: LUN Reset Recvd thru IOCTL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		rqi = snic_req_init(snic, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		if (!rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 			goto dev_rst_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		memset(scsi_cmd_priv(sc), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			sizeof(struct snic_internal_io_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		CMD_SP(sc) = (char *)rqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		CMD_FLAGS(sc) = SNIC_NO_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		/* Add special tag for dr coming from user spc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		rqi->sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	ret = snic_send_dr_and_wait(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 			      "Devrst: IO w/ Tag %x Failed w/ err = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 			      tag, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		snic_unlink_and_release_req(snic, sc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		goto dev_rst_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	ret = snic_dr_finish(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) dev_rst_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		 jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		      "Devrst: Returning from Device Reset : %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		      (ret == SUCCESS) ? "SUCCESS" : "FAILED");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) } /* end of snic_device_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)  * SCSI Error handling calls driver's eh_host_reset if all prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)  * error handling levels return FAILED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)  * Host Reset is the highest level of error recovery. If this fails, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)  * host is offlined by SCSI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)  * snic_issue_hba_reset : Queues FW Reset Request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	struct snic_host_req *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	DECLARE_COMPLETION_ONSTACK(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	rqi = snic_req_init(snic, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		goto hba_rst_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	if (snic_cmd_tag(sc) == SCSI_NO_TAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		memset(scsi_cmd_priv(sc), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			sizeof(struct snic_internal_io_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		rqi->sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	req = rqi_to_req(rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	SNIC_BUG_ON(CMD_SP(sc) != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	CMD_STATE(sc) = SNIC_IOREQ_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	CMD_SP(sc) = (char *) rqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	snic->remove_wait = &wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	/* Initialize Request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 			snic->config.hid, 0, (ulong) rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	req->u.reset.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	ret = snic_queue_wq_desc(snic, req, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 			      "issu_hr:Queuing HBA Reset Failed. w err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 			      ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		goto hba_rst_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	atomic64_inc(&snic->s_stats.reset.hba_resets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	wait_for_completion_timeout(snic->remove_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 				    SNIC_HOST_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	if (snic_get_state(snic) == SNIC_FWRESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		goto hba_rst_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	snic->remove_wait = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		snic_req_free(snic, rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) hba_rst_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	snic->remove_wait = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	if (rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		snic_req_free(snic, rqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) hba_rst_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		      "reset:HBA Reset Failed w/ err = %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		      ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) } /* end of snic_issue_hba_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	struct snic *snic = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	enum snic_state sv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	/* Set snic state as SNIC_FWRESET*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	sv_state = snic_get_state(snic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	spin_lock_irqsave(&snic->snic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	if (snic_get_state(snic) == SNIC_FWRESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		spin_unlock_irqrestore(&snic->snic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		msleep(SNIC_HOST_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		goto reset_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	snic_set_state(snic, SNIC_FWRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	spin_unlock_irqrestore(&snic->snic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	/* Wait for all the IOs that are entered in Qcmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	while (atomic_read(&snic->ios_inflight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		schedule_timeout(msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	ret = snic_issue_hba_reset(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		SNIC_HOST_ERR(shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 			      "reset:Host Reset Failed w/ err %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 			      ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		spin_lock_irqsave(&snic->snic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		snic_set_state(snic, sv_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		spin_unlock_irqrestore(&snic->snic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		atomic64_inc(&snic->s_stats.reset.hba_reset_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		goto reset_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) reset_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) } /* end of snic_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)  * SCSI Error handling calls driver's eh_host_reset if all prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)  * error handling levels return FAILED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)  * Host Reset is the highest level of error recovery. If this fails, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)  * host is offlined by SCSI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) snic_host_reset(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	struct Scsi_Host *shost = sc->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	u32 start_time  = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	SNIC_SCSI_DBG(shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		      "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		      sc, sc->cmnd[0], sc->request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		      snic_cmd_tag(sc), CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	ret = snic_reset(shost, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		 jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) } /* end of snic_host_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)  * snic_cmpl_pending_tmreq : Caller should hold io_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		      "Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		      sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	 * CASE : FW didn't post itmf completion due to PCIe Errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	 * Marking the abort status as Success to call scsi completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	 * in snic_abort_finish()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	if (!rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	if (rqi->dr_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		complete(rqi->dr_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	else if (rqi->abts_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		complete(rqi->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)  * snic_scsi_cleanup: Walks through tag map and releases the reqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) snic_scsi_cleanup(struct snic *snic, int ex_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	struct scsi_cmnd *sc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	u64 st_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	for (tag = 0; tag < snic->max_tag_id; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		/* Skip ex_tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		if (tag == ex_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		io_lock = snic_io_lock_tag(snic, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		sc = scsi_host_find_tag(snic->shost, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		if (unlikely(snic_tmreq_pending(sc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			 * When FW Completes reset w/o sending completions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			 * for outstanding ios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 			snic_cmpl_pending_tmreq(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		if (!rqi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 			      "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 			      sc, rqi, tag, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 		CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		st_time = rqi->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		SNIC_HOST_INFO(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 			       "sc_clean: Releasing rqi %p : flags 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 			       rqi, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		snic_release_req_buf(snic, rqi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		sc->result = DID_TRANSPORT_DISRUPTED << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		SNIC_HOST_INFO(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 			       "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 			       sc, sc->request->tag, CMD_FLAGS(sc), rqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 			       jiffies_to_msecs(jiffies - st_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		/* Update IO stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 		snic_stats_update_io_cmpl(&snic->s_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		if (sc->scsi_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 			SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 				 jiffies_to_msecs(jiffies - st_time), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 				 SNIC_TRC_CMD(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 				 SNIC_TRC_CMD_STATE_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 			sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) } /* end of snic_scsi_cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) snic_shutdown_scsi_cleanup(struct snic *snic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	snic_scsi_cleanup(snic, SCSI_NO_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) } /* end of snic_shutdown_scsi_cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)  * snic_internal_abort_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)  * called by : snic_tgt_scsi_abort_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	struct snic_req_info *rqi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	u32 sv_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	io_lock = snic_io_lock_hash(snic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	rqi = (struct snic_req_info *) CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	if (!rqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		goto skip_internal_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		goto skip_internal_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		(!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 			      "internal_abts: dev rst not pending sc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 			      sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		goto skip_internal_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		SNIC_SCSI_DBG(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			"internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 			sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		goto skip_internal_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	sv_state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		/* stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		rqi->tm_tag = SNIC_TAG_DEV_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		      snic_cmd_tag(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	SNIC_BUG_ON(rqi->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	ret = snic_queue_abort_req(snic, rqi, sc, tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 			      "internal_abts: Tag = %x , Failed w/ err = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 			      snic_cmd_tag(sc), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 			CMD_STATE(sc) = sv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		goto skip_internal_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) skip_internal_abts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	lockdep_assert_held(io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) } /* end of snic_internal_abort_io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)  * snic_tgt_scsi_abort_io : called by snic_tgt_del
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) snic_tgt_scsi_abort_io(struct snic_tgt *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	struct snic *snic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	struct scsi_cmnd *sc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	struct snic_tgt *sc_tgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	int ret = 0, tag, abt_cnt = 0, tmf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	if (!tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	snic = shost_priv(snic_tgt_to_shost(tgt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	if (tgt->tdata.typ == SNIC_TGT_DAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		tmf = SNIC_ITMF_ABTS_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 		tmf = SNIC_ITMF_ABTS_TASK_TERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	for (tag = 0; tag < snic->max_tag_id; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		io_lock = snic_io_lock_tag(snic, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 		spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		sc = scsi_host_find_tag(snic->shost, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		sc_tgt = starget_to_tgt(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		if (sc_tgt != tgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 			spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		ret = snic_internal_abort_io(snic, sc, tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 			SNIC_HOST_ERR(snic->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 				      "tgt_abt_io: Tag %x, Failed w err = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 				      tag, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 		if (ret == SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 			abt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) } /* end of snic_tgt_scsi_abort_io */