^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2008 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2007 Nuova Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This program is free software; you may redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <scsi/fc/fc_els.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <scsi/fc/fc_fcoe.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <scsi/libfc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <scsi/fc_frame.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "fnic_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "fnic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) const char *fnic_state_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static const char *fnic_ioreq_state_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static const char *fcpio_status_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) const char *fnic_state_to_str(unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return fnic_state_str[state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static const char *fnic_ioreq_state_to_str(unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) !fnic_ioreq_state_str[state])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return fnic_ioreq_state_str[state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static const char *fnic_fcpio_status_to_str(unsigned int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return fcpio_status_str[status];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return &fnic->io_req_lock[hash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Unmap the data buffer and sense buffer for an io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * also unmap and free the device-private scatter/gather list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void fnic_release_ioreq_buf(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct fnic_io_req *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (io_req->sgl_list_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) scsi_dma_unmap(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (io_req->sgl_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) mempool_free(io_req->sgl_list_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) fnic->io_sgl_pool[io_req->sgl_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (io_req->sense_buf_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Free up Copy Wq descriptors. Called with copy_wq lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* if no Ack received from firmware, then nothing to clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!fnic->fw_ack_recd[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * Update desc_available count based on number of freed descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Account for wraparound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (wq->to_clean_index <= fnic->fw_ack_index[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) wq->ring.desc_avail += (fnic->fw_ack_index[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) - wq->to_clean_index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) wq->ring.desc_avail += (wq->ring.desc_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) - wq->to_clean_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) + fnic->fw_ack_index[0] + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * just bump clean index to ack_index+1 accounting for wraparound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * this will essentially free up all descriptors between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * to_clean_index and fw_ack_index, both inclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) wq->to_clean_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* we have processed the acks received so far */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) fnic->fw_ack_recd[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * __fnic_set_state_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Sets/Clears bits in fnic's state_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long clearbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned long host_lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (clearbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) fnic->state_flags &= ~st_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) fnic->state_flags |= st_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * fnic_fw_reset_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Routine to send reset msg to fw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int fnic_fw_reset_handler(struct fnic *fnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct vnic_wq_copy *wq = &fnic->wq_copy[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* indicate fwreset to io path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) skb_queue_purge(&fnic->frame_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) skb_queue_purge(&fnic->tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* wait for io cmpl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) while (atomic_read(&fnic->in_flight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) schedule_timeout(msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) free_wq_copy_descs(fnic, wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!vnic_wq_copy_desc_avail(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) atomic64_read(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) &fnic->fnic_stats.fw_stats.active_fw_reqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) "Issued fw reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) "Failed to issue fw reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * fnic_flogi_reg_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Routine to send flogi register msg to fw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct vnic_wq_copy *wq = &fnic->wq_copy[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) enum fcpio_flogi_reg_format_type format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct fc_lport *lp = fnic->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u8 gw_mac[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) free_wq_copy_descs(fnic, wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!vnic_wq_copy_desc_avail(wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) goto flogi_reg_ioreq_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (fnic->ctlr.map_dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) eth_broadcast_addr(gw_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) format = FCPIO_FLOGI_REG_DEF_DEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) format = FCPIO_FLOGI_REG_GW_DEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) fc_id, gw_mac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) fnic->data_src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) lp->r_a_tov, lp->e_d_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) fc_id, fnic->data_src_addr, gw_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) format, fc_id, gw_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) "FLOGI reg issued fcid %x map %d dest %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) fc_id, fnic->ctlr.map_dest, gw_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) flogi_reg_ioreq_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * fnic_queue_wq_copy_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Routine to enqueue a wq copy desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct vnic_wq_copy *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct fnic_io_req *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct scsi_cmnd *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int sg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct fc_rport_libfc_priv *rp = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct host_sg_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long intr_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u8 exch_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct scsi_lun fc_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (sg_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* For each SGE, create a device desc entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) desc = io_req->sgl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) desc->addr = cpu_to_le64(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) desc->len = cpu_to_le32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) desc->_resvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) io_req->sgl_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) sizeof(io_req->sgl_list[0]) * sg_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) printk(KERN_ERR "DMA mapping failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sc->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) SCSI_SENSE_BUFFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) sizeof(io_req->sgl_list[0]) * sg_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) printk(KERN_ERR "DMA mapping failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int_to_scsilun(sc->device->lun, &fc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* Enqueue the descriptor in the Copy WQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) free_wq_copy_descs(fnic, wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) "fnic_queue_wq_copy_desc failure - no descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (sc->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) flags = FCPIO_ICMND_RDDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) else if (sc->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) flags = FCPIO_ICMND_WRDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) exch_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) (rp->flags & FC_RP_FLAGS_RETRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 0, exch_flags, io_req->sgl_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) SCSI_SENSE_BUFFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) io_req->sgl_list_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) io_req->sense_buf_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 0, /* scsi cmd ref, always 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) FCPIO_ICMND_PTA_SIMPLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* scsi pri and tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) flags, /* command flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) sc->cmnd, sc->cmd_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) scsi_bufflen(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) fc_lun.scsi_lun, io_req->port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) rport->maxframe_size, rp->r_a_tov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) rp->e_d_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * fnic_queuecommand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Routine to send a scsi cdb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Called with host_lock held and interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct fc_lport *lp = shost_priv(sc->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct fc_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct fnic_io_req *io_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct fnic *fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct vnic_wq_copy *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u64 cmd_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int sg_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) spinlock_t *io_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int io_lock_acquired = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct fc_rport_libfc_priv *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) rport = starget_to_rport(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) "returning DID_NO_CONNECT for IO as rport is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) sc->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ret = fc_remote_port_chkready(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) "rport is not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) sc->result = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) rp = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!rp || rp->rp_state == RPORT_ST_DELETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) "rport 0x%x removed, returning DID_NO_CONNECT\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) sc->result = DID_NO_CONNECT<<16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (rp->rp_state != RPORT_ST_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) rport->port_id, rp->rp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) sc->result = DID_IMM_RETRY << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (lp->state != LPORT_ST_READY || !(lp->link_up))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) atomic_inc(&fnic->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Release host lock, use driver resource specific locks from here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Don't re-enable interrupts in case they were disabled prior to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * caller disabling them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) spin_unlock(lp->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) CMD_FLAGS(sc) = FNIC_NO_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* Get a new io_req for this SCSI IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) atomic64_inc(&fnic_stats->io_stats.alloc_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ret = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) memset(io_req, 0, sizeof(*io_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* Map the data buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) sg_count = scsi_dma_map(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (sg_count < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) sc->request->tag, sc, 0, sc->cmnd[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) sg_count, CMD_STATE(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Determine the type of scatter/gather list we need */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) io_req->sgl_cnt = sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (sg_count > FNIC_DFLT_SG_DESC_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) io_req->sgl_type = FNIC_SGL_CACHE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (sg_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) io_req->sgl_list =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!io_req->sgl_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) atomic64_inc(&fnic_stats->io_stats.alloc_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ret = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) scsi_dma_unmap(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* Cache sgl list allocated address before alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) io_req->sgl_list_alloc = io_req->sgl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ptr = (unsigned long) io_req->sgl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (ptr % FNIC_SG_DESC_ALIGN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) io_req->sgl_list = (struct host_sg_desc *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) (((unsigned long) ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) + FNIC_SG_DESC_ALIGN - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) & ~(FNIC_SG_DESC_ALIGN - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Will acquire lock defore setting to IO initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) io_lock = fnic_io_lock_hash(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* initialize rest of io_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) io_lock_acquired = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) io_req->port_id = rport->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) io_req->start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) CMD_SP(sc) = (char *)io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) sc->scsi_done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* create copy wq desc and enqueue it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) wq = &fnic->wq_copy[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * In case another thread cancelled the request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * refetch the pointer under the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) sc->request->tag, sc, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) fnic_release_ioreq_buf(fnic, io_req, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) atomic_dec(&fnic->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* acquire host lock before returning to SCSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) spin_lock(lp->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) atomic64_inc(&fnic_stats->io_stats.active_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) atomic64_inc(&fnic_stats->io_stats.num_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (atomic64_read(&fnic_stats->io_stats.active_ios) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) atomic64_read(&fnic_stats->io_stats.max_active_ios))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) atomic64_set(&fnic_stats->io_stats.max_active_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) atomic64_read(&fnic_stats->io_stats.active_ios));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* REVISIT: Use per IO lock in the final code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) sc->cmnd[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) sc->request->tag, sc, io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) sg_count, cmd_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* if only we issued IO, will we have the io lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (io_lock_acquired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) atomic_dec(&fnic->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* acquire host lock before returning to SCSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) spin_lock(lp->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) DEF_SCSI_QCMD(fnic_queuecommand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * fnic_fcpio_fw_reset_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Routine to handle fw reset completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct fcpio_fw_req *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) u8 hdr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct fcpio_tag tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) atomic64_inc(&reset_stats->fw_reset_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* Clean up all outstanding io requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) fnic_cleanup_io(fnic, SCSI_NO_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) atomic64_set(&fnic->io_cmpl_skip, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* fnic should be in FC_TRANS_ETH_MODE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Check status of reset completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (!hdr_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) "reset cmpl success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* Ready to send flogi out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) fnic->state = FNIC_IN_ETH_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) FNIC_SCSI_DBG(KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) "fnic fw_reset : failed %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * Unable to change to eth mode, cannot send out flogi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * Change state to fc mode, so that subsequent Flogi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * requests from libFC will cause more attempts to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * reset the firmware. Free the cached flogi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) fnic->state = FNIC_IN_FC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) atomic64_inc(&reset_stats->fw_reset_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) FNIC_SCSI_DBG(KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) "Unexpected state %s while processing"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) " reset cmpl\n", fnic_state_to_str(fnic->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) atomic64_inc(&reset_stats->fw_reset_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* Thread removing device blocks till firmware reset is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (fnic->remove_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) complete(fnic->remove_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * If fnic is being removed, or fw reset failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * free the flogi frame. Else, send it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (fnic->remove_wait || ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) skb_queue_purge(&fnic->tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) goto reset_cmpl_handler_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) fnic_flush_tx(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) reset_cmpl_handler_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * fnic_fcpio_flogi_reg_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * Routine to handle flogi register completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct fcpio_fw_req *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) u8 hdr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct fcpio_tag tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Update fnic state based on status of flogi reg completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Check flogi registration completion status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!hdr_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) "flog reg succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) fnic->state = FNIC_IN_FC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) FNIC_SCSI_DBG(KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) "fnic flogi reg :failed %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) fnic->state = FNIC_IN_ETH_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) "Unexpected fnic state %s while"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) " processing flogi reg completion\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) fnic_state_to_str(fnic->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (fnic->stop_rx_link_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto reg_cmpl_handler_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) fnic_flush_tx(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) queue_work(fnic_event_queue, &fnic->frame_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) reg_cmpl_handler_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) u16 request_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (wq->to_clean_index <= wq->to_use_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* out of range, stale request_out index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (request_out < wq->to_clean_index ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) request_out >= wq->to_use_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* out of range, stale request_out index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (request_out < wq->to_clean_index &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) request_out >= wq->to_use_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* request_out index is in range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Mark that ack received and store the Ack index. If there are multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * acks received before Tx thread cleans it up, the latest value will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * used which is correct behavior. This state should be in the copy Wq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * instead of in the fnic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) unsigned int cq_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct fcpio_fw_req *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct vnic_wq_copy *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) u16 request_out = desc->u.ack.request_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) u64 *ox_id_tag = (u64 *)(void *)desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* mark the ack state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (is_ack_index_in_range(wq, request_out)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) fnic->fw_ack_index[0] = request_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) fnic->fw_ack_recd[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) atomic64_inc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) FNIC_TRACE(fnic_fcpio_ack_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ox_id_tag[4], ox_id_tag[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * fnic_fcpio_icmnd_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Routine to handle icmnd completions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct fcpio_fw_req *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u8 hdr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct fcpio_tag tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) u64 xfer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct fcpio_icmnd_cmpl *icmnd_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct fnic_io_req *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u64 cmd_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) unsigned long start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) unsigned long io_duration_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* Decode the cmpl description to get the io_req id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) fcpio_tag_id_dec(&tag, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) icmnd_cmpl = &desc->u.icmnd_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (id >= fnic->fnic_max_tag_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) "Tag out of range tag %x hdr status = %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) id, fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) sc = scsi_host_find_tag(fnic->lport->host, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) WARN_ON_ONCE(!sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) atomic64_inc(&fnic_stats->io_stats.sc_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) "icmnd_cmpl sc is null - "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) "hdr status = %s tag = 0x%x desc = 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) fnic_fcpio_status_to_str(hdr_status), id, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) fnic->lport->host->host_no, id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) ((u64)icmnd_cmpl->_resvd0[1] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) (u64)icmnd_cmpl->_resvd0[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ((u64)hdr_status << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) (u64)icmnd_cmpl->scsi_status << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) (u64)icmnd_cmpl->flags), desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) (u64)icmnd_cmpl->residual, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) io_lock = fnic_io_lock_hash(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) WARN_ON_ONCE(!io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) atomic64_inc(&fnic_stats->io_stats.ioreq_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) "icmnd_cmpl io_req is null - "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) "hdr status = %s tag = 0x%x sc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) fnic_fcpio_status_to_str(hdr_status), id, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) start_time = io_req->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* firmware completed the io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) io_req->io_completed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * if SCSI-ML has already issued abort on this command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * set completion of the IO. The abts path will clean it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * set the FNIC_IO_DONE so that this doesn't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * flagged as 'out of order' if it was not aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) CMD_FLAGS(sc) |= FNIC_IO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if(FCPIO_ABORTED == hdr_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) "icmnd_cmpl abts pending "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) "hdr status = %s tag = 0x%x sc = 0x%p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) "scsi_status = %x residual = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) fnic_fcpio_status_to_str(hdr_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) id, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) icmnd_cmpl->scsi_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) icmnd_cmpl->residual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* Mark the IO as complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) icmnd_cmpl = &desc->u.icmnd_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) switch (hdr_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) case FCPIO_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) xfer_len = scsi_bufflen(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) scsi_set_resid(sc, icmnd_cmpl->residual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) xfer_len -= icmnd_cmpl->residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) atomic64_inc(&fnic_stats->misc_stats.check_condition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) case FCPIO_TIMEOUT: /* request was timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) case FCPIO_ABORTED: /* request was aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) scsi_set_resid(sc, icmnd_cmpl->residual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) atomic64_inc(&fnic_stats->io_stats.io_not_found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) case FCPIO_FW_ERR: /* request was terminated due fw error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) case FCPIO_INVALID_HEADER: /* header contains invalid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* Break link with the SCSI command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) CMD_FLAGS(sc) |= FNIC_IO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (hdr_status != FCPIO_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) atomic64_inc(&fnic_stats->io_stats.io_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) fnic_release_ioreq_buf(fnic, io_req, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) cmd_trace = ((u64)hdr_status << 56) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) (u64)icmnd_cmpl->scsi_status << 48 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) sc->device->host->host_no, id, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ((u64)icmnd_cmpl->_resvd0[1] << 56 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) (u64)icmnd_cmpl->_resvd0[0] << 48 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) jiffies_to_msecs(jiffies - start_time)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) desc, cmd_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (sc->sc_data_direction == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) fnic->lport->host_stats.fcp_input_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) fnic->fcp_input_bytes += xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) fnic->lport->host_stats.fcp_output_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) fnic->fcp_output_bytes += xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) fnic->lport->host_stats.fcp_control_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) atomic64_dec(&fnic_stats->io_stats.active_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (atomic64_read(&fnic->io_cmpl_skip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) atomic64_dec(&fnic->io_cmpl_skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) atomic64_inc(&fnic_stats->io_stats.io_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) io_duration_time = jiffies_to_msecs(jiffies) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) jiffies_to_msecs(start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if(io_duration_time <= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) else if(io_duration_time <= 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) else if(io_duration_time <= 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) else if(io_duration_time <= 5000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) else if(io_duration_time <= 10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) else if(io_duration_time <= 30000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* Call SCSI completion function to complete the IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (sc->scsi_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /* fnic_fcpio_itmf_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * Routine to handle itmf completions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct fcpio_fw_req *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) u8 hdr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct fcpio_tag tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct fnic_io_req *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) unsigned long start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) fcpio_tag_id_dec(&tag, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) "Tag out of range tag %x hdr status = %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) id, fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) WARN_ON_ONCE(!sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) atomic64_inc(&fnic_stats->io_stats.sc_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) fnic_fcpio_status_to_str(hdr_status), id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) io_lock = fnic_io_lock_hash(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) WARN_ON_ONCE(!io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) atomic64_inc(&fnic_stats->io_stats.ioreq_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) "itmf_cmpl io_req is null - "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) "hdr status = %s tag = 0x%x sc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) fnic_fcpio_status_to_str(hdr_status), id, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) start_time = io_req->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /* Abort and terminate completion of device reset req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* REVISIT : Add asserts about various flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) "dev reset abts cmpl recd. id %x status %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) id, fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) CMD_ABTS_STATUS(sc) = hdr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (io_req->abts_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) complete(io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) } else if (id & FNIC_TAG_ABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* Completion of abort cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) switch (hdr_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) case FCPIO_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) case FCPIO_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) atomic64_inc(&abts_stats->abort_fw_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) atomic64_inc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) &term_stats->terminate_fw_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) case FCPIO_ITMF_REJECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) "abort reject recd. id %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) (int)(id & FNIC_TAG_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) case FCPIO_IO_NOT_FOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) atomic64_inc(&abts_stats->abort_io_not_found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) atomic64_inc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) &term_stats->terminate_io_not_found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) atomic64_inc(&abts_stats->abort_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) atomic64_inc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) &term_stats->terminate_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /* This is a late completion. Ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) CMD_ABTS_STATUS(sc) = hdr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* If the status is IO not found consider it as success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (hdr_status == FCPIO_IO_NOT_FOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) "abts cmpl recd. id %d status %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) (int)(id & FNIC_TAG_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * If scsi_eh thread is blocked waiting for abts to complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * signal completion to it. IO will be cleaned in the thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * else clean it in this context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (io_req->abts_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) complete(io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) "abts cmpl, completing IO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) sc->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) fnic_release_ioreq_buf(fnic, io_req, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (sc->scsi_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) sc->device->host->host_no, id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) (((u64)hdr_status << 40) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) (u64)sc->cmnd[0] << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) (u64)sc->cmnd[2] << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) (u64)sc->cmnd[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) (((u64)CMD_FLAGS(sc) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) atomic64_dec(&fnic_stats->io_stats.active_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (atomic64_read(&fnic->io_cmpl_skip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) atomic64_dec(&fnic->io_cmpl_skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) atomic64_inc(&fnic_stats->io_stats.io_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) } else if (id & FNIC_TAG_DEV_RST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* Completion of device reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) CMD_LR_STATUS(sc) = hdr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) sc->device->host->host_no, id, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) desc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) "Terminate pending "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) "dev reset cmpl recd. id %d status %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) (int)(id & FNIC_TAG_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* Need to wait for terminate completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) sc->device->host->host_no, id, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) desc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) "dev reset cmpl recd after time out. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) "id %d status %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) (int)(id & FNIC_TAG_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) "dev reset cmpl recd. id %d status %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) (int)(id & FNIC_TAG_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) fnic_fcpio_status_to_str(hdr_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (io_req->dr_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) complete(io_req->dr_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) "Unexpected itmf io state %s tag %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * fnic_fcpio_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * Routine to service the cq for wq_copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) unsigned int cq_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct fcpio_fw_req *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct fnic *fnic = vnic_dev_priv(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) switch (desc->hdr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) case FCPIO_ICMND_CMPL: /* fw completed a command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) case FCPIO_RESET_CMPL: /* fw completed reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) switch (desc->hdr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) case FCPIO_ACK: /* fw copied copy wq desc to its queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) fnic_fcpio_ack_handler(fnic, cq_index, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) case FCPIO_ICMND_CMPL: /* fw completed a command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) fnic_fcpio_itmf_cmpl_handler(fnic, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) case FCPIO_RESET_CMPL: /* fw completed reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) "firmware completion type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) desc->hdr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * fnic_wq_copy_cmpl_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * Routine to process wq copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) unsigned int wq_work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) unsigned int i, cq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) unsigned int cur_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) u64 start_jiffies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) u64 end_jiffies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) u64 delta_jiffies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) u64 delta_ms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) for (i = 0; i < fnic->wq_copy_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) cq_index = i + fnic->raw_wq_count + fnic->rq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) start_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) fnic_fcpio_cmpl_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) copy_work_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) end_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) wq_work_done += cur_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) delta_jiffies = end_jiffies - start_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (delta_jiffies >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) atomic64_set(&misc_stats->max_isr_jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) delta_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) delta_ms = jiffies_to_msecs(delta_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) atomic64_set(&misc_stats->corr_work_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) cur_work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return wq_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct fnic_io_req *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) unsigned long start_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) for (i = 0; i < fnic->fnic_max_tag_id; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (i == exclude_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) io_lock = fnic_io_lock_tag(fnic, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) sc = scsi_host_find_tag(fnic->lport->host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * We will be here only when FW completes reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * without sending completions for outstanding ios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (io_req && io_req->dr_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) complete(io_req->dr_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) else if (io_req && io_req->abts_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) complete(io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * If there is a scsi_cmnd associated with this io_req, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * free the corresponding state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) start_time = io_req->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) fnic_release_ioreq_buf(fnic, io_req, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) sc->result = DID_TRANSPORT_DISRUPTED << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) __func__, sc->request->tag, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) (jiffies - start_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (atomic64_read(&fnic->io_cmpl_skip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) atomic64_dec(&fnic->io_cmpl_skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) atomic64_inc(&fnic_stats->io_stats.io_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /* Complete the command to SCSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (sc->scsi_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) sc->request->tag, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) FNIC_TRACE(fnic_cleanup_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) sc->device->host->host_no, i, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 0, ((u64)sc->cmnd[0] << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) (u64)sc->cmnd[2] << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) (u64)sc->cmnd[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct fcpio_host_req *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct fnic *fnic = vnic_dev_priv(wq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct fnic_io_req *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) unsigned long start_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* get the tag reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) fcpio_tag_id_dec(&desc->hdr.tag, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) id &= FNIC_TAG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (id >= fnic->fnic_max_tag_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) sc = scsi_host_find_tag(fnic->lport->host, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (!sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) io_lock = fnic_io_lock_hash(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* Get the IO context which this desc refers to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /* fnic interrupts are turned off by now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) goto wq_copy_cleanup_scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) start_time = io_req->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) fnic_release_ioreq_buf(fnic, io_req, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) wq_copy_cleanup_scsi_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) sc->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) " DID_NO_CONNECT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (sc->scsi_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) FNIC_TRACE(fnic_wq_copy_cleanup_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) sc->device->host->host_no, id, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 0, ((u64)sc->cmnd[0] << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) u32 task_req, u8 *fc_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct fnic_io_req *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct vnic_wq_copy *wq = &fnic->wq_copy[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct Scsi_Host *host = fnic->lport->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) spin_lock_irqsave(host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (unlikely(fnic_chk_state_flags_locked(fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) FNIC_FLAGS_IO_BLOCKED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) spin_unlock_irqrestore(host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) atomic_inc(&fnic->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) spin_unlock_irqrestore(host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) free_wq_copy_descs(fnic, wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (!vnic_wq_copy_desc_avail(wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) atomic_dec(&fnic->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) "fnic_queue_abort_io_req: failure: no descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 0, task_req, tag, fc_lun, io_req->port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) fnic->config.ra_tov, fnic->config.ed_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) atomic_dec(&fnic->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) int abt_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) int term_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct fnic_io_req *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct scsi_lun fc_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) enum fnic_ioreq_state old_ioreq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) FNIC_SCSI_DBG(KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) "fnic_rport_exch_reset called portid 0x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (fnic->in_remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) abt_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) io_lock = fnic_io_lock_tag(fnic, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) sc = scsi_host_find_tag(fnic->lport->host, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (!io_req || io_req->port_id != port_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * Found IO that is still pending with firmware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * belongs to rport that went away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (io_req->abts_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) "fnic_rport_exch_reset: io_req->abts_done is set "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) "state is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) fnic_ioreq_state_to_str(CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) "rport_exch_reset "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) "IO not yet issued %p tag 0x%x flags "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) "%x state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) old_ioreq_state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) atomic64_inc(&reset_stats->device_reset_terminates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) abt_tag = (tag | FNIC_TAG_DEV_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) "fnic_rport_exch_reset dev rst sc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) BUG_ON(io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) "fnic_rport_reset_exch: Issuing abts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) /* Now queue the abort command to firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) int_to_scsilun(sc->device->lun, &fc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (fnic_queue_abort_io_req(fnic, abt_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) FCPIO_ITMF_ABT_TASK_TERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) fc_lun.scsi_lun, io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * Revert the cmd state back to old state, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * it hasn't changed in between. This cmd will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * aborted later by scsi_eh, or cleaned up during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * lun reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) CMD_STATE(sc) = old_ioreq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) atomic64_inc(&term_stats->terminates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) term_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (term_cnt > atomic64_read(&term_stats->max_terminates))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) atomic64_set(&term_stats->max_terminates, term_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) void fnic_terminate_rport_io(struct fc_rport *rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) int abt_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) int term_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) struct fnic_io_req *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct scsi_lun fc_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct fc_rport_libfc_priv *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) struct fnic *fnic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct fc_rport *cmd_rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct reset_stats *reset_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) struct terminate_stats *term_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) enum fnic_ioreq_state old_ioreq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (!rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) rdata = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (!rdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) lport = rdata->local_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (!lport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) fnic = lport_priv(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) FNIC_SCSI_DBG(KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) fnic->lport->host, "fnic_terminate_rport_io called"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) rport->port_name, rport->node_name, rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (fnic->in_remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) reset_stats = &fnic->fnic_stats.reset_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) term_stats = &fnic->fnic_stats.term_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) abt_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) io_lock = fnic_io_lock_tag(fnic, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) sc = scsi_host_find_tag(fnic->lport->host, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (!sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) cmd_rport = starget_to_rport(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (rport != cmd_rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (!io_req || rport != cmd_rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * Found IO that is still pending with firmware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * belongs to rport that went away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (io_req->abts_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) "fnic_terminate_rport_io: io_req->abts_done is set "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) "state is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) fnic_ioreq_state_to_str(CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) "fnic_terminate_rport_io "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) "IO not yet issued %p tag 0x%x flags "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) "%x state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) old_ioreq_state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) atomic64_inc(&reset_stats->device_reset_terminates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) abt_tag = (tag | FNIC_TAG_DEV_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) BUG_ON(io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) FNIC_SCSI_DBG(KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) "fnic_terminate_rport_io: Issuing abts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* Now queue the abort command to firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) int_to_scsilun(sc->device->lun, &fc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (fnic_queue_abort_io_req(fnic, abt_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) FCPIO_ITMF_ABT_TASK_TERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) fc_lun.scsi_lun, io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * Revert the cmd state back to old state, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * it hasn't changed in between. This cmd will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * aborted later by scsi_eh, or cleaned up during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * lun reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) CMD_STATE(sc) = old_ioreq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) atomic64_inc(&term_stats->terminates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) term_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (term_cnt > atomic64_read(&term_stats->max_terminates))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) atomic64_set(&term_stats->max_terminates, term_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * This function is exported to SCSI for sending abort cmnds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * A SCSI IO is represented by a io_req in the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) int fnic_abort_cmd(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct fc_lport *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct fnic *fnic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct fnic_io_req *io_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct fc_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) unsigned long start_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) int ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) u32 task_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) struct scsi_lun fc_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) struct fnic_stats *fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct abort_stats *abts_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) struct terminate_stats *term_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) enum fnic_ioreq_state old_ioreq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) unsigned long abt_issued_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) DECLARE_COMPLETION_ONSTACK(tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) /* Wait for rport to unblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) fc_block_scsi_eh(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /* Get local-port, check ready and link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) lp = shost_priv(sc->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) abts_stats = &fnic->fnic_stats.abts_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) term_stats = &fnic->fnic_stats.term_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) rport = starget_to_rport(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) tag = sc->request->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) FNIC_SCSI_DBG(KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) CMD_FLAGS(sc) = FNIC_NO_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) goto fnic_abort_cmd_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * Avoid a race between SCSI issuing the abort and the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * completing the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * If the command is already completed by the fw cmpl code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * we just return SUCCESS from here. This means that the abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * succeeded. In the SCSI ML, since the timeout for command has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * happened, the completion wont actually complete the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * and it will be considered as an aborted command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * The CMD_SP will not be cleared except while holding io_req_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) io_lock = fnic_io_lock_hash(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) goto fnic_abort_cmd_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) io_req->abts_done = &tm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) goto wait_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (abt_issued_time <= 6000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * Command is still pending, need to abort it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * If the firmware completes the command after this point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * the completion wont be done till mid-layer, since abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * has already started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) old_ioreq_state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * Check readiness of the remote port. If the path to remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * port is up, then send abts to the remote port to terminate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * the IO. Else, just locally terminate the IO in the firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (fc_remote_port_chkready(rport) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) task_req = FCPIO_ITMF_ABT_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) task_req = FCPIO_ITMF_ABT_TASK_TERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /* Now queue the abort command to firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) int_to_scsilun(sc->device->lun, &fc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) fc_lun.scsi_lun, io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) CMD_STATE(sc) = old_ioreq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) io_req->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) goto fnic_abort_cmd_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (task_req == FCPIO_ITMF_ABT_TASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) atomic64_inc(&fnic_stats->abts_stats.aborts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) atomic64_inc(&fnic_stats->term_stats.terminates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * We queued an abort IO, wait for its completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * Once the firmware completes the abort command, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) * wake up this thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) wait_pending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) wait_for_completion_timeout(&tm_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) msecs_to_jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) (2 * fnic->config.ra_tov +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) fnic->config.ed_tov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /* Check the abort status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) atomic64_inc(&fnic_stats->io_stats.ioreq_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) goto fnic_abort_cmd_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) io_req->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /* fw did not complete abort, timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (task_req == FCPIO_ITMF_ABT_TASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) atomic64_inc(&abts_stats->abort_drv_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) atomic64_inc(&term_stats->terminate_drv_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) goto fnic_abort_cmd_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /* IO out of order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) "Issuing Host reset due to out of order IO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) goto fnic_abort_cmd_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) start_time = io_req->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * firmware completed the abort, check the status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * free the io_req if successful. If abort fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * Device reset will clean the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) goto fnic_abort_cmd_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) fnic_release_ioreq_buf(fnic, io_req, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (sc->scsi_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) /* Call SCSI completion function to complete the IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) sc->result = (DID_ABORT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) atomic64_dec(&fnic_stats->io_stats.active_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (atomic64_read(&fnic->io_cmpl_skip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) atomic64_dec(&fnic->io_cmpl_skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) atomic64_inc(&fnic_stats->io_stats.io_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) fnic_abort_cmd_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) sc->request->tag, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 0, ((u64)sc->cmnd[0] << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) "Returning from abort cmd type %x %s\n", task_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) (ret == SUCCESS) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) "SUCCESS" : "FAILED");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static inline int fnic_queue_dr_io_req(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) struct scsi_cmnd *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct fnic_io_req *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct vnic_wq_copy *wq = &fnic->wq_copy[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) struct Scsi_Host *host = fnic->lport->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct scsi_lun fc_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) unsigned long intr_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) spin_lock_irqsave(host->host_lock, intr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (unlikely(fnic_chk_state_flags_locked(fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) FNIC_FLAGS_IO_BLOCKED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) spin_unlock_irqrestore(host->host_lock, intr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) atomic_inc(&fnic->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) spin_unlock_irqrestore(host->host_lock, intr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) free_wq_copy_descs(fnic, wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (!vnic_wq_copy_desc_avail(wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) "queue_dr_io_req failure - no descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) goto lr_io_req_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* fill in the lun info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) int_to_scsilun(sc->device->lun, &fc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) fc_lun.scsi_lun, io_req->port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) fnic->config.ra_tov, fnic->config.ed_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) lr_io_req_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) atomic_dec(&fnic->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * Clean up any pending aborts on the lun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * For each outstanding IO on this lun, whose abort is not completed by fw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * issue a local abort. Wait for abort to complete. Return 0 if all commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * successfully aborted, 1 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) static int fnic_clean_pending_aborts(struct fnic *fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) struct scsi_cmnd *lr_sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) bool new_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) int tag, abt_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) struct fnic_io_req *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct scsi_lun fc_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) struct scsi_device *lun_dev = lr_sc->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) DECLARE_COMPLETION_ONSTACK(tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) enum fnic_ioreq_state old_ioreq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) io_lock = fnic_io_lock_tag(fnic, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) sc = scsi_host_find_tag(fnic->lport->host, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) * ignore this lun reset cmd if issued using new SC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) * or cmds that do not belong to this lun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (!io_req || sc->device != lun_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) * Found IO that is still pending with firmware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) * belongs to the LUN that we are resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) "Found IO in %s on lun\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) fnic_ioreq_state_to_str(CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) "%s dev rst not pending sc 0x%p\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (io_req->abts_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) "%s: io_req->abts_done is set state is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) old_ioreq_state = CMD_STATE(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * Any pending IO issued prior to reset is expected to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * in abts pending state, if not we need to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * When IO is completed, the IO will be handed over and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) * handled in this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) BUG_ON(io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) abt_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) abt_tag |= FNIC_TAG_DEV_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) "%s: dev rst sc 0x%p\n", __func__, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) io_req->abts_done = &tm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) /* Now queue the abort command to firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) int_to_scsilun(sc->device->lun, &fc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (fnic_queue_abort_io_req(fnic, abt_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) FCPIO_ITMF_ABT_TASK_TERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) fc_lun.scsi_lun, io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) io_req->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) CMD_STATE(sc) = old_ioreq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) goto clean_pending_aborts_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) wait_for_completion_timeout(&tm_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) msecs_to_jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) (fnic->config.ed_tov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) /* Recheck cmd state to check if it is now aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) io_req->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) /* if abort is still pending with fw, fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) goto clean_pending_aborts_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) /* original sc used for lr is handled by dev reset code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (sc != lr_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) /* original sc used for lr is handled by dev reset code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (sc != lr_sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) fnic_release_ioreq_buf(fnic, io_req, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * Any IO is returned during reset, it needs to call scsi_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * to return the scsi_cmnd to upper layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (sc->scsi_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) /* Set result to let upper SCSI layer retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) sc->result = DID_RESET << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) /* walk again to check, if IOs are still pending in fw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (fnic_is_abts_pending(fnic, lr_sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) clean_pending_aborts_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) * fnic_scsi_host_start_tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * Allocates tagid from host's tag list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) struct request_queue *q = sc->request->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) struct request *dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (IS_ERR(dummy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return SCSI_NO_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) sc->tag = sc->request->tag = dummy->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) sc->host_scribble = (unsigned char *)dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) return dummy->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) * fnic_scsi_host_end_tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) * frees tag allocated by fnic_scsi_host_start_tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) struct request *dummy = (struct request *)sc->host_scribble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) blk_mq_free_request(dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) * on the LUN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) int fnic_device_reset(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) struct fc_lport *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) struct fnic *fnic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) struct fnic_io_req *io_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) struct fc_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) unsigned long start_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) struct scsi_lun fc_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) struct fnic_stats *fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) struct reset_stats *reset_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) int tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) DECLARE_COMPLETION_ONSTACK(tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) bool new_sc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) /* Wait for rport to unblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) fc_block_scsi_eh(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) /* Get local-port, check ready and link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) lp = shost_priv(sc->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) reset_stats = &fnic->fnic_stats.reset_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) atomic64_inc(&reset_stats->device_resets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) rport = starget_to_rport(scsi_target(sc->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) rport->port_id, sc->device->lun, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (lp->state != LPORT_ST_READY || !(lp->link_up))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) goto fnic_device_reset_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) /* Check if remote port up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (fc_remote_port_chkready(rport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) goto fnic_device_reset_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) /* Allocate tag if not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) tag = sc->request->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (unlikely(tag < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * Really should fix the midlayer to pass in a proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * request for ioctls...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) tag = fnic_scsi_host_start_tag(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (unlikely(tag == SCSI_NO_TAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) goto fnic_device_reset_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) tag_gen_flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) new_sc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) io_lock = fnic_io_lock_hash(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * If there is a io_req attached to this command, then use it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) * else allocate a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) goto fnic_device_reset_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) memset(io_req, 0, sizeof(*io_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) io_req->port_id = rport->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) CMD_SP(sc) = (char *)io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) io_req->dr_done = &tm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * issue the device reset, if enqueue failed, clean up the ioreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) * and break assoc with scsi cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) io_req->dr_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) goto fnic_device_reset_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) * Wait on the local completion for LUN reset. The io_req may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) * freed while we wait since we hold no lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) wait_for_completion_timeout(&tm_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) goto fnic_device_reset_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) io_req->dr_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) status = CMD_LR_STATUS(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * If lun reset not completed, bail out with failed. io_req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * gets cleaned up during higher levels of EH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) if (status == FCPIO_INVALID_CODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) atomic64_inc(&reset_stats->device_reset_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) "Device reset timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) int_to_scsilun(sc->device->lun, &fc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * Issue abort and terminate on device reset request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * If q'ing of terminate fails, retry it after a delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (fnic_queue_abort_io_req(fnic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) tag | FNIC_TAG_DEV_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) FCPIO_ITMF_ABT_TASK_TERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) fc_lun.scsi_lun, io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) wait_for_completion_timeout(&tm_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) io_req->abts_done = &tm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) "Abort and terminate issued on Device reset "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) "tag 0x%x sc 0x%p\n", tag, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) wait_for_completion_timeout(&tm_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) io_req->abts_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) goto fnic_device_reset_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) /* Completed, but not successful, clean up the io_req, return fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (status != FCPIO_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) FNIC_SCSI_DBG(KERN_DEBUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) "Device reset completed - failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) goto fnic_device_reset_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) * Clean up any aborts on this lun that have still not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * completed. If any of these fail, then LUN reset fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * clean_pending_aborts cleans all cmds on this lun except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * the lun reset cmd. If all cmds get cleaned, the lun reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * succeeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) "Device reset failed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) " since could not abort all IOs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) goto fnic_device_reset_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) /* Clean lun reset command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) if (io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) /* Completed, and successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) fnic_device_reset_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) CMD_SP(sc) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) start_time = io_req->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) fnic_release_ioreq_buf(fnic, io_req, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) mempool_free(io_req, fnic->io_req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) fnic_device_reset_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) sc->request->tag, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) jiffies_to_msecs(jiffies - start_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 0, ((u64)sc->cmnd[0] << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) /* free tag if it is allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (unlikely(tag_gen_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) fnic_scsi_host_end_tag(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) "Returning from device reset %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) (ret == SUCCESS) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) "SUCCESS" : "FAILED");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (ret == FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) atomic64_inc(&reset_stats->device_reset_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) /* Clean up all IOs, clean up libFC local port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) int fnic_reset(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) struct fc_lport *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) struct fnic *fnic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) struct reset_stats *reset_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) lp = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) reset_stats = &fnic->fnic_stats.reset_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) "fnic_reset called\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) atomic64_inc(&reset_stats->fnic_resets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) * Reset local port, this will clean up libFC exchanges,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) * reset remote port sessions, and if link is up, begin flogi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) ret = fc_lport_reset(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) "Returning from fnic reset %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) (ret == 0) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) "SUCCESS" : "FAILED");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) atomic64_inc(&reset_stats->fnic_reset_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) atomic64_inc(&reset_stats->fnic_reset_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) * SCSI Error handling calls driver's eh_host_reset if all prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * error handling levels return FAILED. If host reset completes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * successfully, and if link is up, then Fabric login begins.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) * Host Reset is the highest level of error recovery. If this fails, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) * host is offlined by SCSI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) int fnic_host_reset(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) unsigned long wait_host_tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) struct Scsi_Host *shost = sc->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) struct fc_lport *lp = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) struct fnic *fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) if (!fnic->internal_reset_inprogress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) fnic->internal_reset_inprogress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) "host reset in progress skipping another host reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * If fnic_reset is successful, wait for fabric login to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) * scsi-ml tries to send a TUR to every device if host reset is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) * successful, so before returning to scsi, fabric should be up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) if (ret == SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) while (time_before(jiffies, wait_host_tmo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) if ((lp->state == LPORT_ST_READY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) (lp->link_up)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) fnic->internal_reset_inprogress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * This fxn is called from libFC when host is removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) void fnic_scsi_abort_io(struct fc_lport *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) enum fnic_state old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) struct fnic *fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) DECLARE_COMPLETION_ONSTACK(remove_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) /* Issue firmware reset for fnic, wait for reset to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) retry_fw_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) /* fw reset is in progress, poll for its completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) schedule_timeout(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) goto retry_fw_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) fnic->remove_wait = &remove_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) old_state = fnic->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) err = fnic_fw_reset_handler(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) fnic->state = old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) fnic->remove_wait = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /* Wait for firmware reset to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) wait_for_completion_timeout(&remove_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) fnic->remove_wait = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) "fnic_scsi_abort_io %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) (fnic->state == FNIC_IN_ETH_MODE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) "SUCCESS" : "FAILED");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) * This fxn called from libFC to clean up driver IO state on link down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) void fnic_scsi_cleanup(struct fc_lport *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) enum fnic_state old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) struct fnic *fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) /* issue fw reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) retry_fw_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /* fw reset is in progress, poll for its completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) schedule_timeout(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) goto retry_fw_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) old_state = fnic->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (fnic_fw_reset_handler(fnic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) fnic->state = old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) void fnic_empty_scsi_cleanup(struct fc_lport *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) struct fnic *fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) /* Non-zero sid, nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) goto call_fc_exch_mgr_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) if (did) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) fnic_rport_exch_reset(fnic, did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) goto call_fc_exch_mgr_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) * sid = 0, did = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) * link down or device being removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) if (!fnic->in_remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) fnic_scsi_cleanup(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) fnic_scsi_abort_io(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) /* call libFC exch mgr reset to reset its exchanges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) call_fc_exch_mgr_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) fc_exch_mgr_reset(lp, sid, did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) * fnic_is_abts_pending() is a helper function that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) * walks through tag map to check if there is any IOs pending,if there is one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) * then it returns 1 (true), otherwise 0 (false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) * otherwise, it checks for all IOs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) struct fnic_io_req *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) spinlock_t *io_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) struct scsi_device *lun_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (lr_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) lun_dev = lr_sc->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) /* walk again to check, if IOs are still pending in fw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) sc = scsi_host_find_tag(fnic->lport->host, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * ignore this lun reset cmd or cmds that do not belong to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) * this lun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) io_lock = fnic_io_lock_hash(fnic, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) spin_lock_irqsave(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) io_req = (struct fnic_io_req *)CMD_SP(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) if (!io_req || sc->device != lun_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) * Found IO that is still pending with firmware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) * belongs to the LUN that we are resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) "Found IO in %s on lun\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) fnic_ioreq_state_to_str(CMD_STATE(sc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) spin_unlock_irqrestore(io_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) }