^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * QLogic FCoE Offload Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2016-2018 Cavium Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "qedf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) unsigned int timer_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) msecs_to_jiffies(timer_msec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static void qedf_cmd_timeout(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct qedf_ioreq *io_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) container_of(work, struct qedf_ioreq, timeout_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (io_req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) fcport = io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (io_req->fcport == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) switch (io_req->cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) case QEDF_ABTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (qedf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) QEDF_INFO(NULL, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) "qedf is NULL for ABTS xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Cleanup timed out ABTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) qedf_initiate_cleanup(io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) complete(&io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Need to call kref_put for reference taken when initiate_abts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * was called since abts_compl won't be called now that we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * cleaned up the task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Clear in abort bit now that we're done with the command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Now that the original I/O and the ABTS are complete see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * if we need to reconnect to the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) qedf_restart_rport(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) case QEDF_ELS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!qedf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) QEDF_INFO(NULL, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "qedf is NULL for ELS xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* ELS request no longer outstanding since it timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) kref_get(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Don't attempt to clean an ELS timeout as any subseqeunt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * ABTS or cleanup requests just hang. For now just free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * the resources of the original I/O and the RRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) qedf_initiate_cleanup(io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) io_req->event = QEDF_IOREQ_EV_ELS_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Call callback function to complete command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (io_req->cb_func && io_req->cb_arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) io_req->cb_func(io_req->cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) io_req->cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) case QEDF_SEQ_CLEANUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) "xid=0x%x.\n", io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) qedf_initiate_cleanup(io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) io_req->event = QEDF_IOREQ_EV_ELS_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) "Hit default case, xid=0x%x.\n", io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct io_bdt *bdt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct qedf_ctx *qedf = cmgr->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) size_t bd_tbl_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u16 min_xid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int num_ios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct qedf_ioreq *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) num_ios = max_xid - min_xid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Free fcoe_bdt_ctx structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!cmgr->io_bdt_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) goto free_cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) for (i = 0; i < num_ios; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bdt_info = cmgr->io_bdt_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (bdt_info->bd_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) bdt_info->bd_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Destroy io_bdt pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) for (i = 0; i < num_ios; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) kfree(cmgr->io_bdt_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) cmgr->io_bdt_pool[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) kfree(cmgr->io_bdt_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) cmgr->io_bdt_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) free_cmd_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) for (i = 0; i < num_ios; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) io_req = &cmgr->cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) kfree(io_req->sgl_task_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) kfree(io_req->task_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Make sure we free per command sense buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (io_req->sense_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) dma_free_coherent(&qedf->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) io_req->sense_buffer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cancel_delayed_work_sync(&io_req->rrq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Free command manager itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) vfree(cmgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void qedf_handle_rrq(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct qedf_ioreq *io_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) container_of(work, struct qedf_ioreq, rrq_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) qedf_send_rrq(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct qedf_cmd_mgr *cmgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct io_bdt *bdt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct qedf_ioreq *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u16 xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int num_ios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u16 min_xid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Make sure num_queues is already set before calling this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!qedf->num_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) "max_xid 0x%x.\n", min_xid, max_xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) "0x%x.\n", min_xid, max_xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) num_ios = max_xid - min_xid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!cmgr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) cmgr->qedf = qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spin_lock_init(&cmgr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Initialize I/O request fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) xid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) for (i = 0; i < num_ios; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) io_req = &cmgr->cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) io_req->xid = xid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Allocate DMA memory to hold sense buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!io_req->sense_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) "Failed to alloc sense buffer.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Allocate task parameters to pass to f/w init funcions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) io_req->task_params = kzalloc(sizeof(*io_req->task_params),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!io_req->task_params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) QEDF_ERR(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) "Failed to allocate task_params for xid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * Allocate scatter/gather list info to pass to f/w init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) io_req->sgl_task_params = kzalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (!io_req->sgl_task_params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) QEDF_ERR(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) "Failed to allocate sgl_task_params for xid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) goto mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Allocate pool of io_bdts - one for each qedf_ioreq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!cmgr->io_bdt_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) goto mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) for (i = 0; i < num_ios; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!cmgr->io_bdt_pool[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) QEDF_WARN(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) "Failed to alloc io_bdt_pool[%d].\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) goto mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) for (i = 0; i < num_ios; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) bdt_info = cmgr->io_bdt_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) &bdt_info->bd_tbl_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!bdt_info->bd_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) QEDF_WARN(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) "Failed to alloc bdt_tbl[%d].\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) goto mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) atomic_set(&cmgr->free_list_cnt, num_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) "cmgr->free_list_cnt=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) atomic_read(&cmgr->free_list_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return cmgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) mem_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) qedf_cmd_mgr_free(cmgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct qedf_ctx *qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct qedf_ioreq *io_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct io_bdt *bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) u16 xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) uint32_t free_sqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) free_sqes = atomic_read(&fcport->free_sqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!free_sqes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) "Returning NULL, free_sqes=%d.\n ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) free_sqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Limit the number of outstanding R/W tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if ((atomic_read(&fcport->num_active_ios) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) NUM_RW_TASKS_PER_CONNECTION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) "Returning NULL, num_active_ios=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) atomic_read(&fcport->num_active_ios));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Limit global TIDs certain tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) "Returning NULL, free_list_cnt=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) atomic_read(&cmd_mgr->free_list_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) spin_lock_irqsave(&cmd_mgr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) io_req = &cmd_mgr->cmds[cmd_mgr->idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) cmd_mgr->idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) cmd_mgr->idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Check to make sure command was previously freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!io_req->alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (i == FCOE_PARAMS_NUM_TASKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) spin_unlock_irqrestore(&cmd_mgr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) "io_req found to be dirty ox_id = 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Clear any flags now that we've reallocated the xid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) io_req->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) io_req->alloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_unlock_irqrestore(&cmd_mgr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) atomic_inc(&fcport->num_active_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) atomic_dec(&fcport->free_sqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) xid = io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) atomic_dec(&cmd_mgr->free_list_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) io_req->cmd_mgr = cmd_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) io_req->fcport = fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* Clear any stale sc_cmd back pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) io_req->lun = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Hold the io_req against deletion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) kref_init(&io_req->refcount); /* ID: 001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Bind io_bdt for this io_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Have a static link between io_req and io_bdt_pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (bd_tbl == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) bd_tbl->io_req = io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) io_req->cmd_type = cmd_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) io_req->tm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* Reset sequence offset data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) io_req->rx_buf_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) io_req->tx_buf_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) io_req->rx_id = 0xffff; /* No OX_ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) out_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Record failure for stats and return NULL to caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) qedf->alloc_failures++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct qedf_mp_req *mp_req = &(io_req->mp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct qedf_ctx *qedf = io_req->fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) uint64_t sz = sizeof(struct scsi_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* clear tm flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (mp_req->mp_req_bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dma_free_coherent(&qedf->pdev->dev, sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) mp_req->mp_req_bd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (mp_req->mp_resp_bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dma_free_coherent(&qedf->pdev->dev, sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mp_req->mp_resp_bd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (mp_req->req_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) mp_req->req_buf, mp_req->req_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) mp_req->req_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (mp_req->resp_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mp_req->resp_buf, mp_req->resp_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mp_req->resp_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) void qedf_release_cmd(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct qedf_ioreq *io_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) container_of(ref, struct qedf_ioreq, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct qedf_rport *fcport = io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (io_req->cmd_type == QEDF_SCSI_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) QEDF_WARN(&fcport->qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) io_req, io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) WARN_ON(io_req->sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (io_req->cmd_type == QEDF_ELS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) io_req->cmd_type == QEDF_TASK_MGMT_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) qedf_free_mp_resc(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) atomic_inc(&cmd_mgr->free_list_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) atomic_dec(&fcport->num_active_ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (atomic_read(&fcport->num_active_ios) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Increment task retry identifier now that the request is released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) io_req->task_retry_identifier++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) io_req->fcport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) io_req->cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) spin_lock_irqsave(&cmd_mgr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) io_req->fcport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) io_req->alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) spin_unlock_irqrestore(&cmd_mgr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static int qedf_map_sg(struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct scsi_cmnd *sc = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct Scsi_Host *host = sc->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct fc_lport *lport = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct qedf_ctx *qedf = lport_priv(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) int sg_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int bd_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u32 sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) scsi_sg_count(sc), sc->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) sg = scsi_sglist(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) io_req->sge_type = QEDF_IOREQ_FAST_SGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) scsi_for_each_sg(sc, sg, sg_count, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) sg_len = (u32)sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) addr = (u64)sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * Intermediate s/g element so check if start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * is page aligned. Only required for writes and only if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * number of scatter/gather elements is 8 or more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) bd[bd_count].sge_len = cpu_to_le32(sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) bd_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) byte_count += sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* To catch a case where FAST and SLOW nothing is set, set FAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) io_req->sge_type = QEDF_IOREQ_FAST_SGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (byte_count != scsi_bufflen(sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) scsi_bufflen(sc), io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct scsi_cmnd *sc = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (scsi_sg_count(sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) bd_count = qedf_map_sg(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (bd_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bd_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) bd[0].sge_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) io_req->bd_tbl->bd_valid = bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct fcp_cmnd *fcp_cmnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* fcp_cmnd is 32 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) memset(fcp_cmnd, 0, FCP_CMND_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* 8 bytes: SCSI LUN info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int_to_scsilun(sc_cmd->device->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) (struct scsi_lun *)&fcp_cmnd->fc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* 4 bytes: flag info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) fcp_cmnd->fc_pri_ta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) fcp_cmnd->fc_tm_flags = io_req->tm_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) fcp_cmnd->fc_flags = io_req->io_req_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) fcp_cmnd->fc_cmdref = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Populate data direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* 16 bytes: CDB information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* 4 bytes: FCP data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct fcoe_wqe *sqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) enum fcoe_task_type task_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct io_bdt *bd_tbl = io_req->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) u8 fcp_cmnd[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u32 tmp_fcp_cmnd[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int bd_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct qedf_ctx *qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct regpair sense_data_buffer_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) u32 tx_io_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) u32 rx_io_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) int i, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Note init_initiator_rw_fcoe_task memsets the task context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) io_req->task = task_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* Set task type bassed on DMA directio of command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) task_type = FCOE_TASK_TYPE_READ_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) tx_io_size = io_req->data_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) task_type = FCOE_TASK_TYPE_READ_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rx_io_size = io_req->data_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Setup the fields for fcoe_task_params */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) io_req->task_params->context = task_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) io_req->task_params->sqe = sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) io_req->task_params->task_type = task_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) io_req->task_params->tx_io_size = tx_io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) io_req->task_params->rx_io_size = rx_io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) io_req->task_params->conn_cid = fcport->fw_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) io_req->task_params->itid = io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) io_req->task_params->cq_rss_number = cq_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) io_req->task_params->is_tape_device = fcport->dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* Fill in information for scatter/gather list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) bd_count = bd_tbl->bd_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) io_req->sgl_task_params->sgl_phys_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) U64_LO(bd_tbl->bd_tbl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) io_req->sgl_task_params->sgl_phys_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) U64_HI(bd_tbl->bd_tbl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) io_req->sgl_task_params->num_sges = bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) io_req->sgl_task_params->total_buffer_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) scsi_bufflen(io_req->sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) io_req->sgl_task_params->small_mid_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) io_req->sgl_task_params->small_mid_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* Fill in physical address of sense buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* fill FCP_CMND IU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* Swap fcp_cmnd since FC is big endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) for (i = 0; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) init_initiator_rw_fcoe_task(io_req->task_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) io_req->sgl_task_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) sense_data_buffer_phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) io_req->task_retry_identifier, fcp_cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* Increment SGL type counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) qedf->slow_sge_ios++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) qedf->fast_sge_ios++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) void qedf_init_mp_task(struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct qedf_mp_req *mp_req = &(io_req->mp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct qedf_rport *fcport = io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct qedf_ctx *qedf = io_req->fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct fc_frame_header *fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct fcoe_tx_mid_path_params task_fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct scsi_sgl_task_params tx_sgl_task_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct scsi_sgl_task_params rx_sgl_task_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) "Initializing MP task for cmd_type=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) io_req->cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) qedf->control_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* Setup the task from io_req for easy reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) io_req->task = task_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* Setup the fields for fcoe_task_params */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) io_req->task_params->context = task_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) io_req->task_params->sqe = sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) io_req->task_params->tx_io_size = io_req->data_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* rx_io_size tells the f/w how large a response buffer we have */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) io_req->task_params->rx_io_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) io_req->task_params->conn_cid = fcport->fw_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) io_req->task_params->itid = io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* Return middle path commands on CQ 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) io_req->task_params->cq_rss_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) io_req->task_params->is_tape_device = fcport->dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) fc_hdr = &(mp_req->req_fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Set OX_ID and RX_ID based on driver task id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) fc_hdr->fh_ox_id = io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) fc_hdr->fh_rx_id = htons(0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Set up FC header information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) task_fc_hdr.type = fc_hdr->fh_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* Set up s/g list parameters for request buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) tx_sgl_task_params.sgl = mp_req->mp_req_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) tx_sgl_task_params.num_sges = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* Set PAGE_SIZE for now since sg element is that size ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) tx_sgl_task_params.small_mid_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* Set up s/g list parameters for request buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) rx_sgl_task_params.num_sges = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* Set PAGE_SIZE for now since sg element is that size ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) rx_sgl_task_params.small_mid_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Last arg is 0 as previous code did not set that we wanted the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * fc header information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) &task_fc_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) &tx_sgl_task_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) &rx_sgl_task_params, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Presumed that fcport->rport_lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u16 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rval = fcport->sq_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* Adjust ring index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) fcport->sq_prod_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) fcport->fw_sq_prod_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (fcport->sq_prod_idx == total_sqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) fcport->sq_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) void qedf_ring_doorbell(struct qedf_rport *fcport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct fcoe_db_data dbell = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) dbell.agg_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dbell.sq_prod = fcport->fw_sq_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* wmb makes sure that the BDs data is updated before updating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * producer, otherwise FW may read old data from the BDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) writel(*(u32 *)&dbell, fcport->p_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Fence required to flush the write combined buffer, since another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * CPU may write to the same doorbell address and data may be lost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * due to relaxed order nature of write combined bar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int8_t direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct qedf_ctx *qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct qedf_io_log *io_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) uint8_t op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) spin_lock_irqsave(&qedf->io_trace_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) io_log->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) io_log->task_id = io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) io_log->port_id = fcport->rdata->ids.port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) io_log->lun = sc_cmd->device->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) io_log->op = op = sc_cmd->cmnd[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) io_log->lba[0] = sc_cmd->cmnd[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) io_log->lba[1] = sc_cmd->cmnd[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) io_log->lba[2] = sc_cmd->cmnd[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) io_log->lba[3] = sc_cmd->cmnd[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) io_log->bufflen = scsi_bufflen(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) io_log->sg_count = scsi_sg_count(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) io_log->result = sc_cmd->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) io_log->jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) io_log->refcount = kref_read(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (direction == QEDF_IO_TRACE_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* For requests we only care abot the submission CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) io_log->req_cpu = io_req->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) io_log->int_cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) io_log->rsp_cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) } else if (direction == QEDF_IO_TRACE_RSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) io_log->req_cpu = io_req->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) io_log->int_cpu = io_req->int_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) io_log->rsp_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) io_log->sge_type = io_req->sge_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) qedf->io_trace_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) qedf->io_trace_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct Scsi_Host *host = sc_cmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct fc_lport *lport = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct qedf_ctx *qedf = lport_priv(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct e4_fcoe_task_context *task_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) u16 xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct fcoe_wqe *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) u16 sqe_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Initialize rest of io_req fileds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) io_req->data_xfer_len = scsi_bufflen(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) sc_cmd->SCp.ptr = (char *)io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /* Record which cpu this request is associated with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) io_req->cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) io_req->io_req_flags = QEDF_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) qedf->input_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) io_req->io_req_flags = QEDF_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) qedf->output_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) io_req->io_req_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) qedf->control_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) xid = io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* Build buffer descriptor list for firmware from sg list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (qedf_build_bd_list_from_sg(io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Release cmd will release io_req, but sc_cmd is assigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Release cmd will release io_req, but sc_cmd is assigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* Record LUN number for later use if we neeed them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) io_req->lun = (int)sc_cmd->device->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* Obtain free SQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) sqe_idx = qedf_get_sqe_idx(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) sqe = &fcport->sq[sqe_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) memset(sqe, 0, sizeof(struct fcoe_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /* Get the task context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!task_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* Release cmd will release io_req, but sc_cmd is assigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* Ring doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) qedf_ring_doorbell(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Set that command is with the firmware now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (qedf_io_tracing && io_req->sc_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct fc_lport *lport = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct qedf_ctx *qedf = lport_priv(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct fc_rport_libfc_priv *rp = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct qedf_ioreq *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int num_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) num_sgs = scsi_sg_count(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) "Number of SG elements %d exceeds what hardware limitation of %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) num_sgs, QEDF_MAX_BDS_PER_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) sc_cmd->result = DID_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) sc_cmd->scsi_done(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) "Returning DNC as unloading or stop io, flags 0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) qedf->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) sc_cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) sc_cmd->scsi_done(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (!qedf->pdev->msix_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) sc_cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) sc_cmd->scsi_done(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) rval = fc_remote_port_chkready(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rval, rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) sc_cmd->result = rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) sc_cmd->scsi_done(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* Retry command if we are doing a qed drain operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rc = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) goto exit_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (lport->state != LPORT_ST_READY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) rc = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) goto exit_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /* rport and tgt are allocated together, so tgt should be non-NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) fcport = (struct qedf_rport *)&rp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Session is not offloaded yet. Let SCSI-ml retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) rc = SCSI_MLQUEUE_TARGET_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto exit_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) atomic_inc(&fcport->ios_to_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (fcport->retry_delay_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* Take fcport->rport_lock for resetting the delay_timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (time_after(jiffies, fcport->retry_delay_timestamp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) fcport->retry_delay_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /* If retry_delay timer is active, flow off the ML */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) rc = SCSI_MLQUEUE_TARGET_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) atomic_dec(&fcport->ios_to_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto exit_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) rc = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) atomic_dec(&fcport->ios_to_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) goto exit_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) io_req->sc_cmd = sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* Take fcport->rport_lock for posting to fcport send queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (qedf_post_io_req(fcport, io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /* Return SQE to pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) atomic_inc(&fcport->free_sqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) rc = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) atomic_dec(&fcport->ios_to_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) exit_qcmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct fcoe_cqe_rsp_info *fcp_rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct qedf_ctx *qedf = io_req->fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) u8 rsp_flags = fcp_rsp->rsp_flags.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int fcp_sns_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) int fcp_rsp_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) uint8_t *rsp_info, *sense_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) io_req->fcp_status = FC_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) io_req->fcp_resid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) io_req->fcp_resid = fcp_rsp->fcp_resid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) io_req->scsi_comp_flags = rsp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) fcp_rsp->scsi_status_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (rsp_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) fcp_rsp_len = fcp_rsp->fcp_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (rsp_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) fcp_sns_len = fcp_rsp->fcp_sns_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) io_req->fcp_rsp_len = fcp_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) io_req->fcp_sns_len = fcp_sns_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rsp_info = sense_data = io_req->sense_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* fetch fcp_rsp_code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /* Only for task management function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) io_req->fcp_rsp_code = rsp_info[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* Adjust sense-data location. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) sense_data += fcp_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) "Truncating sense buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /* The sense buffer can be NULL for TMF commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (sc_cmd->sense_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (fcp_sns_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) memcpy(sc_cmd->sense_buffer, sense_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) fcp_sns_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct scsi_cmnd *sc = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) scsi_sg_count(sc), sc->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) io_req->bd_tbl->bd_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct scsi_cmnd *sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct fcoe_cqe_rsp_info *fcp_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) u16 scope, qualifier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) u8 fw_residual_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) u16 chk_scope = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (!io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) sc_cmd = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) fcp_rsp = &cqe->cqe_info.rsp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (!sc_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (!sc_cmd->SCp.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) "another context.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (!sc_cmd->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) "Device for sc_cmd %p is NULL.\n", sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!sc_cmd->request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) "sc_cmd=%p.\n", sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (!sc_cmd->request->q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) "is not valid, sc_cmd=%p.\n", sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) fcport = io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * When flush is active, let the cmds be completed from the cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) "Dropping good completion xid=0x%x as fcport is flushing",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) qedf_parse_fcp_rsp(io_req, fcp_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) qedf_unmap_sg_list(qedf, io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Check for FCP transport error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) QEDF_ERR(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) io_req->fcp_rsp_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) sc_cmd->result = DID_BUS_BUSY << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) FCOE_CQE_RSP_INFO_FW_UNDERRUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (fw_residual_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) io_req->xid, fcp_rsp->rsp_flags.flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) io_req->fcp_resid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (io_req->cdb_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * Set resid to the whole buffer length so we won't try to resue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * any previously data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) switch (io_req->fcp_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) case FC_GOOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (io_req->cdb_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* Good I/O completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) sc_cmd->result = DID_OK << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) refcount = kref_read(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) "%d:0:%d:%lld xid=0x%0x op=0x%02x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) "lba=%02x%02x%02x%02x cdb_status=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) "fcp_resid=0x%x refcount=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) qedf->lport->host->host_no, sc_cmd->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) sc_cmd->device->lun, io_req->xid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) sc_cmd->cmnd[4], sc_cmd->cmnd[5],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) io_req->cdb_status, io_req->fcp_resid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) io_req->cdb_status == SAM_STAT_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * Check whether we need to set retry_delay at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * all based on retry_delay module parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * and the status qualifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /* Upper 2 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) scope = fcp_rsp->retry_delay_timer & 0xC000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* Lower 14 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (qedf_retry_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) chk_scope = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* Record stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (io_req->cdb_status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) SAM_STAT_TASK_SET_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) qedf->task_set_fulls++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) qedf->busy++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (io_req->fcp_resid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) scsi_set_resid(sc_cmd, io_req->fcp_resid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (chk_scope == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if ((scope == 1 || scope == 2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) (qualifier > 0 && qualifier <= 0x3FEF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* Check we don't go over the max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (qualifier > QEDF_RETRY_DELAY_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) qualifier = QEDF_RETRY_DELAY_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) "qualifier = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) (fcp_rsp->retry_delay_timer &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 0x3FFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) "Scope = %d and qualifier = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) scope, qualifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* Take fcport->rport_lock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * update the retry_delay_timestamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) fcport->retry_delay_timestamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) jiffies + (qualifier * HZ / 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) spin_unlock_irqrestore(&fcport->rport_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) scope, qualifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) io_req->fcp_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (qedf_io_tracing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * We wait till the end of the function to clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * outstanding bit in case we need to send an abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) sc_cmd->SCp.ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) sc_cmd->scsi_done(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /* Return a SCSI command in some other context besides a normal completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) int result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct scsi_cmnd *sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) "io_req:%p scsi_done handling already done\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * We will be done with this command after this call so clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * outstanding bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) sc_cmd = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (!sc_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (!virt_addr_valid(sc_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) goto bad_scsi_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (!sc_cmd->SCp.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) "another context.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (!sc_cmd->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) goto bad_scsi_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (!virt_addr_valid(sc_cmd->device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) goto bad_scsi_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (!sc_cmd->sense_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) goto bad_scsi_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (!virt_addr_valid(sc_cmd->sense_buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) goto bad_scsi_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!sc_cmd->scsi_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) goto bad_scsi_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) qedf_unmap_sg_list(qedf, io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) sc_cmd->result = result << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) refcount = kref_read(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) "allowed=%d retries=%d refcount=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) qedf->lport->host->host_no, sc_cmd->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * Set resid to the whole buffer length so we won't try to resue any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * previously read data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (qedf_io_tracing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) sc_cmd->SCp.ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) sc_cmd->scsi_done(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) bad_scsi_ptr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * Clear the io_req->sc_cmd backpointer so we don't try to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * this again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * Handle warning type CQE completions. This is mainly used for REC timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * popping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) int rval, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct qedf_rport *fcport = io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) u64 err_warn_bit_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) u8 err_warn = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (!cqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) "cqe is NULL for io_req %p xid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) io_req, io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) "xid=0x%x\n", io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) "err_warn_bitmap=%08x:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) "rx_buff_off=%08x, rx_id=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) le32_to_cpu(cqe->cqe_info.err_info.rx_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /* Normalize the error bitmap value to an just an unsigned int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) err_warn_bit_map = (u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) for (i = 0; i < 64; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (err_warn_bit_map & (u64)((u64)1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) err_warn = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* Check if REC TOV expired if this is a tape device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (err_warn ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) io_req->rx_buf_off =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) cqe->cqe_info.err_info.rx_buf_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) io_req->tx_buf_off =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) cqe->cqe_info.err_info.tx_buf_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) io_req->rx_id = cqe->cqe_info.err_info.rx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) rval = qedf_send_rec(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * We only want to abort the io_req if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * can't queue the REC command as we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * keep the exchange open for recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) goto send_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) send_abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) init_completion(&io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) rval = qedf_initiate_abts(io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* Cleanup a command when we receive an error detection completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (io_req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (io_req->fcport == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!cqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) "cqe is NULL for io_req %p\n", io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) "xid=0x%x\n", io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) "err_warn_bitmap=%08x:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) "rx_buff_off=%08x, rx_id=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) le32_to_cpu(cqe->cqe_info.err_info.rx_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /* When flush is active, let the cmds be flushed out from the cleanup context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) "Dropping EQE for xid=0x%x as fcport is flushing",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (qedf->stop_io_on_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) qedf_stop_all_io(qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) init_completion(&io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) rval = qedf_initiate_abts(io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) static void qedf_flush_els_req(struct qedf_ctx *qedf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct qedf_ioreq *els_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) kref_read(&els_req->refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * Need to distinguish this from a timeout when calling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * els_req->cb_func.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /* Cancel the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) cancel_delayed_work_sync(&els_req->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /* Call callback function to complete command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (els_req->cb_func && els_req->cb_arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) els_req->cb_func(els_req->cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) els_req->cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /* Release kref for original initiate_els */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) kref_put(&els_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /* A value of -1 for lun is a wild card that means flush all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * active SCSI I/Os for the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct qedf_ioreq *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct qedf_cmd_mgr *cmd_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) int flush_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int wait_cnt = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) int refcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) QEDF_ERR(NULL, "fcport is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* Check that fcport is still offloaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (!qedf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) QEDF_ERR(NULL, "qedf is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /* Only wait for all commands to be queued in the Upload context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) (lun == -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) while (atomic_read(&fcport->ios_to_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) "Waiting for %d I/Os to be queued\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) atomic_read(&fcport->ios_to_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (wait_cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) QEDF_ERR(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) "%d IOs request could not be queued\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) atomic_read(&fcport->ios_to_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) wait_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) cmd_mgr = qedf->cmd_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) atomic_read(&fcport->num_active_ios), fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) mutex_lock(&qedf->flush_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (lun == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) fcport->lun_reset_lun = lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) io_req = &cmd_mgr->cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (!io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (!io_req->fcport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) spin_lock_irqsave(&cmd_mgr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (io_req->alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (io_req->cmd_type == QEDF_SCSI_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) "Allocated but not queued, xid=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) spin_unlock_irqrestore(&cmd_mgr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) spin_unlock_irqrestore(&cmd_mgr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (io_req->fcport != fcport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * but RRQ is still pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * Workaround: Within qedf_send_rrq, we check if the fcport is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * NULL, and we drop the ref on the io_req to clean it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) refcount = kref_read(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) io_req->xid, io_req->cmd_type, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* If RRQ work has been queue, try to cancel it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * free the io_req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (atomic_read(&io_req->state) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) QEDFC_CMD_ST_RRQ_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (cancel_delayed_work_sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) (&io_req->rrq_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) "Putting reference for pending RRQ work xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /* ID: 003 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) kref_put(&io_req->refcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /* Only consider flushing ELS during target reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (io_req->cmd_type == QEDF_ELS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) lun == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) rc = kref_get_unless_zero(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) QEDF_ERR(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) io_req, io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) qedf_initiate_cleanup(io_req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) flush_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) qedf_flush_els_req(qedf, io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * Release the kref and go back to the top of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) goto free_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (io_req->cmd_type == QEDF_ABTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /* ID: 004 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) rc = kref_get_unless_zero(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) QEDF_ERR(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) io_req, io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (lun != -1 && io_req->lun != lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) goto free_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) "Flushing abort xid=0x%x.\n", io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (cancel_delayed_work_sync(&io_req->rrq_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) "Putting ref for cancelled RRQ work xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (cancel_delayed_work_sync(&io_req->timeout_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) "Putting ref for cancelled tmo work xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) qedf_initiate_cleanup(io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /* Notify eh_abort handler that ABTS is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) complete(&io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* ID: 002 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) flush_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) goto free_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (!io_req->sc_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (!io_req->sc_cmd->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) "Device backpointer NULL for sc_cmd=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) io_req->sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) /* Put reference for non-existent scsi_cmnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) qedf_initiate_cleanup(io_req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (lun > -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (io_req->lun != lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * Use kref_get_unless_zero in the unlikely case the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * we're about to flush was completed in the normal SCSI path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) rc = kref_get_unless_zero(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) "Cleanup xid=0x%x.\n", io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) flush_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) /* Cleanup task and return I/O mid-layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) qedf_initiate_cleanup(io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) free_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) wait_cnt = 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) "Flushed 0x%x I/Os, active=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) flush_cnt, atomic_read(&fcport->num_active_ios));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /* Only wait for all commands to complete in the Upload context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) (lun == -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) while (atomic_read(&fcport->num_active_ios)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) flush_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) atomic_read(&fcport->num_active_ios),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) wait_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (wait_cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) "Flushed %d I/Os, active=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) flush_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) atomic_read(&fcport->num_active_ios));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) io_req = &cmd_mgr->cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (io_req->fcport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) io_req->fcport == fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) refcount =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) kref_read(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) set_bit(QEDF_CMD_DIRTY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) io_req, io_req->xid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) io_req->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) io_req->sc_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) refcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) io_req->cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) wait_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) mutex_unlock(&qedf->flush_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * Initiate a ABTS middle path command. Note that we don't have to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * the task context for an ABTS task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) struct qedf_rport *fcport = io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) struct fc_rport_priv *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) u16 xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct fcoe_wqe *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) u16 sqe_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) int refcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) /* Sanity check qedf_rport before dereferencing any pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) QEDF_ERR(NULL, "tgt not offloaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) rdata = fcport->rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) lport = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) goto drop_rdata_kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) goto drop_rdata_kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /* Ensure room on SQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (!atomic_read(&fcport->free_sqes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) goto drop_rdata_kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) goto drop_rdata_kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) io_req->xid, io_req->sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) goto drop_rdata_kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) kref_get(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) xid = io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) qedf->control_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) qedf->packet_aborts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /* Set the command type to abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) io_req->cmd_type = QEDF_ABTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) refcount = kref_read(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) "ABTS io_req xid = 0x%x refcount=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) xid, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) sqe_idx = qedf_get_sqe_idx(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) sqe = &fcport->sq[sqe_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) memset(sqe, 0, sizeof(struct fcoe_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) io_req->task_params->sqe = sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) init_initiator_abort_fcoe_task(io_req->task_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) qedf_ring_doorbell(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) drop_rdata_kref:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) kref_put(&rdata->kref, fc_rport_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) uint32_t r_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) struct qedf_rport *fcport = io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) r_ctl = cqe->cqe_info.abts_info.r_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) /* This was added at a point when we were scheduling abts_compl &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * cleanup_compl on different CPUs and there was a possibility of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * the io_req to be freed from the other context before we got here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) "Dropping ABTS completion xid=0x%x as fcport is NULL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * When flush is active, let the cmds be completed from the cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) "Dropping ABTS completion xid=0x%x as fcport is flushing",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (!cancel_delayed_work(&io_req->timeout_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) "Wasn't able to cancel abts timeout work.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) switch (r_ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) case FC_RCTL_BA_ACC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) "ABTS response - ACC Send RRQ after R_A_TOV\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * Dont release this cmd yet. It will be relesed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * after we get RRQ response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) msecs_to_jiffies(qedf->lport->r_a_tov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /* For error cases let the cleanup return the command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) case FC_RCTL_BA_RJT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) "ABTS response - RJT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (io_req->sc_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (!io_req->return_scsi_cmd_on_abts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) "Not call scsi_done for xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (io_req->return_scsi_cmd_on_abts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) qedf_scsi_done(qedf, io_req, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) /* Notify eh_abort handler that ABTS is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) complete(&io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) int qedf_init_mp_req(struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) struct qedf_mp_req *mp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) struct scsi_sge *mp_req_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) struct scsi_sge *mp_resp_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) struct qedf_ctx *qedf = io_req->fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) uint64_t sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) memset(mp_req, 0, sizeof(struct qedf_mp_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (io_req->cmd_type != QEDF_ELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) mp_req->req_len = sizeof(struct fcp_cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) io_req->data_xfer_len = mp_req->req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) mp_req->req_len = io_req->data_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) &mp_req->req_buf_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (!mp_req->req_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) qedf_free_mp_resc(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (!mp_req->resp_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) "buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) qedf_free_mp_resc(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) /* Allocate and map mp_req_bd and mp_resp_bd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) sz = sizeof(struct scsi_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) &mp_req->mp_req_bd_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (!mp_req->mp_req_bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) qedf_free_mp_resc(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) &mp_req->mp_resp_bd_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (!mp_req->mp_resp_bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) qedf_free_mp_resc(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* Fill bd table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) addr = mp_req->req_buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) mp_req_bd = mp_req->mp_req_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) mp_req_bd->sge_addr.lo = U64_LO(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) mp_req_bd->sge_addr.hi = U64_HI(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) mp_req_bd->sge_len = QEDF_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * MP buffer is either a task mgmt command or an ELS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * So the assumption is that it consumes a single bd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * entry in the bd table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) mp_resp_bd = mp_req->mp_resp_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) addr = mp_req->resp_buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) mp_resp_bd->sge_addr.lo = U64_LO(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) mp_resp_bd->sge_addr.hi = U64_HI(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * Last ditch effort to clear the port if it's stuck. Used only after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * cleanup task times out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) static void qedf_drain_request(struct qedf_ctx *qedf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /* Set bit to return all queuecommand requests as busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /* Call qed drain request for function. Should be synchronous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) qed_ops->common->drain(qedf->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /* Settle time for CQEs to be returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) /* Unplug and continue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * Returns SUCCESS if the cleanup task does not timeout, otherwise return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * FAILURE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) bool return_scsi_cmd_on_abts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) int tmo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) int rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) struct fcoe_wqe *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) u16 sqe_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) int refcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) fcport = io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) QEDF_ERR(NULL, "fcport is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) /* Sanity check qedf_rport before dereferencing any pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) QEDF_ERR(NULL, "tgt not offloaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (!qedf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) QEDF_ERR(NULL, "qedf is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (io_req->cmd_type == QEDF_ELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) goto process_els;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) "cleanup processing or already completed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) process_els:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) /* Ensure room on SQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (!atomic_read(&fcport->free_sqes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /* Need to make sure we clear the flag since it was set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (io_req->cmd_type == QEDF_CLEANUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) io_req->xid, io_req->cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) refcount = kref_read(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) refcount, fcport, fcport->rdata->ids.port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) /* Cleanup cmds re-use the same TID as the original I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) io_req->cmd_type = QEDF_CLEANUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) init_completion(&io_req->cleanup_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) sqe_idx = qedf_get_sqe_idx(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) sqe = &fcport->sq[sqe_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) memset(sqe, 0, sizeof(struct fcoe_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) io_req->task_params->sqe = sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) init_initiator_cleanup_fcoe_task(io_req->task_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) qedf_ring_doorbell(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) tmo = wait_for_completion_timeout(&io_req->cleanup_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) QEDF_CLEANUP_TIMEOUT * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) if (!tmo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /* Timeout case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) "xid=%x.\n", io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) /* Issue a drain request if cleanup task times out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) qedf_drain_request(qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* If it TASK MGMT handle it, reference will be decreased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * in qedf_execute_tmf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) io_req->tm_flags == FCP_TMF_TGT_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) complete(&io_req->tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (io_req->sc_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (!io_req->return_scsi_cmd_on_abts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) "Not call scsi_done for xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (io_req->return_scsi_cmd_on_abts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) qedf_scsi_done(qedf, io_req, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (rc == SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /* Complete so we can finish cleaning up the I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) complete(&io_req->cleanup_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) uint8_t tm_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) struct qedf_ioreq *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct e4_fcoe_task_context *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) struct qedf_ctx *qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) struct fc_lport *lport = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) uint16_t xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) int tmo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) int lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) struct fcoe_wqe *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) u16 sqe_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (!sc_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) lun = (int)sc_cmd->device->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) goto no_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (!io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) goto no_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (tm_flags == FCP_TMF_LUN_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) qedf->lun_resets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) else if (tm_flags == FCP_TMF_TGT_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) qedf->target_resets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) /* Initialize rest of io_req fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) io_req->sc_cmd = sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) io_req->fcport = fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) io_req->cmd_type = QEDF_TASK_MGMT_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) /* Record which cpu this request is associated with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) io_req->cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /* Set TM flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) io_req->io_req_flags = QEDF_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) io_req->data_xfer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) io_req->tm_flags = tm_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) /* Default is to return a SCSI command when an error occurs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) io_req->return_scsi_cmd_on_abts = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /* Obtain exchange id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) xid = io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) "0x%x\n", xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /* Initialize task context for this IO request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) task = qedf_get_task_mem(&qedf->tasks, xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) init_completion(&io_req->tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) sqe_idx = qedf_get_sqe_idx(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) sqe = &fcport->sq[sqe_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) memset(sqe, 0, sizeof(struct fcoe_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) qedf_init_task(fcport, lport, io_req, task, sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) qedf_ring_doorbell(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) tmo = wait_for_completion_timeout(&io_req->tm_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) QEDF_TM_TIMEOUT * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) if (!tmo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* Clear outstanding bit since command timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) /* Check TMF response code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if (io_req->fcp_rsp_code == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) * Double check that fcport has not gone into an uploading state before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) * executing the command flush for the LUN/target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) "fcport is uploading, not executing flush.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) goto no_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) /* We do not need this io_req any more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (tm_flags == FCP_TMF_LUN_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) qedf_flush_active_ios(fcport, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) qedf_flush_active_ios(fcport, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) no_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (rc != SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) struct fc_rport_libfc_priv *rp = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) struct fc_lport *lport = shost_priv(sc_cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) int rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) struct qedf_ioreq *io_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) int ref_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) struct fc_rport_priv *rdata = fcport->rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) QEDF_ERR(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) rport->scsi_target_id, (int)sc_cmd->device->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) QEDF_ERR(NULL, "stale rport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) "LUN RESET");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (sc_cmd->SCp.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) ref_cnt = kref_read(&io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) QEDF_ERR(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) io_req, io_req->xid, ref_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) rval = fc_remote_port_chkready(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) QEDF_ERR(NULL, "device_reset rport not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) goto tmf_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) rc = fc_block_scsi_eh(sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) goto tmf_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) QEDF_ERR(NULL, "device_reset: rport is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) goto tmf_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (!qedf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) QEDF_ERR(NULL, "qedf is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) goto tmf_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) goto tmf_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) goto tmf_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) goto tmf_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (!fcport->rdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) "fcport %p port_id=%06x is uploading.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) fcport, fcport->rdata->ids.port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) goto tmf_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) tmf_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) kref_put(&rdata->kref, fc_rport_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct fcoe_cqe_rsp_info *fcp_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) fcp_rsp = &cqe->cqe_info.rsp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) qedf_parse_fcp_rsp(io_req, fcp_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) complete(&io_req->tm_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) struct fcoe_cqe *cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) u32 payload_len, crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) struct fc_frame_header *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) struct qedf_io_work *io_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) u32 bdq_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) void *bdq_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) struct scsi_bd *p_bd_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) le32_to_cpu(p_bd_info->address.hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) le32_to_cpu(p_bd_info->address.lo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) qedf->bdq_prod_idx, pktlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (bdq_idx >= QEDF_BDQ_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) bdq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) goto increment_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) bdq_addr = qedf->bdq[bdq_idx].buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) if (!bdq_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) "unsolicited packet.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) goto increment_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (qedf_dump_frames) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) "BDQ frame is at addr=%p.\n", bdq_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) (void *)bdq_addr, pktlen, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) /* Allocate frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) payload_len = pktlen - sizeof(struct fc_frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) fp = fc_frame_alloc(qedf->lport, payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (!fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) goto increment_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) /* Copy data from BDQ buffer into fc_frame struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) fh = (struct fc_frame_header *)fc_frame_header_get(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) memcpy(fh, (void *)bdq_addr, pktlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) QEDF_WARN(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) fh->fh_type, fc_frame_payload_op(fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* Initialize the frame so libfc sees it as a valid frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) crc = fcoe_fc_crc(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) fc_frame_init(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) fr_dev(fp) = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) fr_sof(fp) = FC_SOF_I3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) fr_eof(fp) = FC_EOF_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) fr_crc(fp) = cpu_to_le32(~crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) * We need to return the frame back up to libfc in a non-atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) * context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (!io_work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) "work for I/O completion.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) fc_frame_free(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) goto increment_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) memset(io_work, 0, sizeof(struct qedf_io_work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) INIT_WORK(&io_work->work, qedf_fp_io_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /* Copy contents of CQE for deferred processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) io_work->qedf = qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) io_work->fp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) increment_prod:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) spin_lock_irqsave(&qedf->hba_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) /* Increment producer to let f/w know we've handled the frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) qedf->bdq_prod_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) /* Producer index wraps at uint16_t boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (qedf->bdq_prod_idx == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) qedf->bdq_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) readw(qedf->bdq_primary_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) readw(qedf->bdq_secondary_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) spin_unlock_irqrestore(&qedf->hba_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }