^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * QLogic FCoE Offload Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2016-2018 Cavium Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "qedf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /* It's assumed that the lock is held when calling this function. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) void *data, uint32_t data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct qedf_ioreq *els_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct qedf_mp_req *mp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct fc_frame_header *fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct e4_fcoe_task_context *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) uint32_t did, sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) uint16_t xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct fcoe_wqe *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u16 sqe_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) QEDF_ERR(NULL, "fcport is NULL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) goto els_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) lport = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) rc = fc_remote_port_chkready(fcport->rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) goto els_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) goto els_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) goto els_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (!els_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "Failed to alloc ELS request 0x%x\n", op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) goto els_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) els_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) els_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) els_req->cmd_type = QEDF_ELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) els_req->fcport = fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) els_req->cb_func = cb_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) cb_arg->io_req = els_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) cb_arg->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) els_req->cb_arg = cb_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) els_req->data_xfer_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Record which cpu this request is associated with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) els_req->cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) rc = qedf_init_mp_req(els_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) kref_put(&els_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) goto els_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Fill ELS Payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) memcpy(mp_req->req_buf, data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) els_req->cb_func = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) els_req->cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kref_put(&els_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) goto els_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Fill FC header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) fc_hdr = &(mp_req->req_fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) did = fcport->rdata->ids.port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) sid = fcport->sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) FC_FC_SEQ_INIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Obtain exchange id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) xid = els_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) sqe_idx = qedf_get_sqe_idx(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) sqe = &fcport->sq[sqe_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) memset(sqe, 0, sizeof(struct fcoe_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Initialize task context for this IO request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) task = qedf_get_task_mem(&qedf->tasks, xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) qedf_init_mp_task(els_req, task, sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Put timer on els request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (timer_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) qedf_cmd_timer_set(qedf, els_req, timer_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Ring doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) "req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) qedf_ring_doorbell(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) els_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct qedf_ioreq *els_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct fcoe_cqe_midpath_info *mp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) || (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) || (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) "ELS completion xid=0x%x after flush event=0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) els_req->xid, els_req->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) fcport = els_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* When flush is active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * let the cmds be completed from the cleanup context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) "Dropping ELS completion xid=0x%x as fcport is flushing",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) els_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Kill the ELS timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cancel_delayed_work(&els_req->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Get ELS response length from CQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mp_info = &cqe->cqe_info.midpath_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) els_req->mp_req.resp_len = mp_info->data_placement_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Parse ELS response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if ((els_req->cb_func) && (els_req->cb_arg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) els_req->cb_func(els_req->cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) els_req->cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) kref_put(&els_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct qedf_ioreq *orig_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct qedf_ioreq *rrq_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) rrq_req = cb_arg->io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) qedf = rrq_req->fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) orig_io_req = cb_arg->aborted_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!orig_io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) refcount = kref_read(&orig_io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * This should return the aborted io_req to the command pool. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * we need to check the refcound in case the original request was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * flushed but we get a completion on this xid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (orig_io_req && refcount > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) kref_put(&orig_io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Release a reference to the rrq request if we timed out as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * rrq completion handler is called directly from the timeout handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * and not from els_compl where the reference would have normally been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) kref_put(&rrq_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Assumes kref is already held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct fc_els_rrq rrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct qedf_els_cb_arg *cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) uint32_t sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) uint32_t r_a_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!aborted_io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) QEDF_ERR(NULL, "abort_io_req is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) fcport = aborted_io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) refcount = kref_read(&aborted_io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) QEDF_ERR(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) aborted_io_req->xid, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) kref_put(&aborted_io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Check that fcport is still offloaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!fcport->qedf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Sanity check that we can send a RRQ to make sure that refcount isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) refcount = kref_read(&aborted_io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (refcount != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) aborted_io_req->xid, aborted_io_req, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) lport = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) sid = fcport->sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) r_a_tov = lport->r_a_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) "io = %p, orig_xid = 0x%x\n", aborted_io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) aborted_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) memset(&rrq, 0, sizeof(rrq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!cb_arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) "RRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) goto rrq_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) cb_arg->aborted_io_req = aborted_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rrq.rrq_cmd = ELS_RRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) hton24(rrq.rrq_s_id, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) rrq.rrq_ox_id = htons(aborted_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) rrq.rrq_rx_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) qedf_rrq_compl, cb_arg, r_a_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) rrq_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) "req 0x%x\n", aborted_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) kref_put(&aborted_io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct fc_frame *fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u16 l2_oxid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct fc_lport *lport = fcport->qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct fc_frame_header *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) fh = (struct fc_frame_header *)fc_frame_header_get(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* Set the OXID we return to what libfc used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (l2_oxid != FC_XID_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) fh->fh_ox_id = htons(l2_oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Setup header fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) fh->fh_r_ctl = FC_RCTL_ELS_REP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) fh->fh_type = FC_TYPE_ELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* Last sequence, end sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) fh->fh_f_ctl[0] = 0x98;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) hton24(fh->fh_d_id, lport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) fh->fh_rx_id = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Set frame attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) crc = fcoe_fc_crc(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) fc_frame_init(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) fr_dev(fp) = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) fr_sof(fp) = FC_SOF_I3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) fr_eof(fp) = FC_EOF_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) fr_crc(fp) = cpu_to_le32(~crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Send completed request to libfc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) fc_exch_recv(lport, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * In instances where an ELS command times out we may need to restart the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * rport by logging out and then logging back in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) void qedf_restart_rport(struct qedf_rport *fcport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct fc_rport_priv *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u32 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) QEDF_ERR(NULL, "fcport is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Set that we are now in reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) rdata = fcport->rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (rdata && !kref_get_unless_zero(&rdata->kref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) fcport->rdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) rdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (rdata && rdata->rp_state == RPORT_ST_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) lport = fcport->qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) port_id = rdata->ids.port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) QEDF_ERR(&(fcport->qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) "LOGO port_id=%x.\n", port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) fc_rport_logoff(rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) kref_put(&rdata->kref, fc_rport_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mutex_lock(&lport->disc.disc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Recreate the rport and log back in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) rdata = fc_rport_create(lport, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) mutex_unlock(&lport->disc.disc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (rdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) fc_rport_login(rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) fcport->rdata = rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct qedf_ioreq *els_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct qedf_mp_req *mp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct fc_frame_header *fh, *mp_fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) void *resp_buf, *fc_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u32 resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u16 l2_oxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) l2_oxid = cb_arg->l2_oxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) els_req = cb_arg->io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!els_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) QEDF_ERR(NULL, "els_req is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) goto free_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * If we are flushing the command just free the cb_arg as none of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * response data will be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) els_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) goto free_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) fcport = els_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mp_req = &(els_req->mp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mp_fc_hdr = &(mp_req->resp_fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) resp_len = mp_req->resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) resp_buf = mp_req->resp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * If a middle path ELS command times out, don't try to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * the command but rather do any internal cleanup and then libfc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * timeout the command and clean up its internal resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * If ADISC times out, libfc will timeout the exchange and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * try to send a PLOGI which will timeout since the session is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * still offloaded. Force libfc to logout the session which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * will offload the connection and allow the PLOGI response to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * flow over the LL2 path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (cb_arg->op == ELS_ADISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) qedf_restart_rport(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) "beyond page size.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto free_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) QEDF_ERR(&(fcport->qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) "fc_frame_alloc failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* Copy frame header from firmware into fp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) fh = (struct fc_frame_header *)fc_frame_header_get(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Copy payload from firmware into fp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) fc_payload = fc_frame_payload_get(fp, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) memcpy(fc_payload, resp_buf, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) free_arg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct fc_els_adisc *adisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct fc_frame_header *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct fc_lport *lport = fcport->qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct qedf_els_cb_arg *cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) uint32_t r_a_tov = lport->r_a_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) fh = fc_frame_header_get(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!cb_arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) "ADISC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) goto adisc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) adisc = fc_frame_payload_get(fp, sizeof(*adisc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) qedf_l2_els_compl, cb_arg, r_a_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) adisc_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct qedf_ioreq *orig_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct qedf_ioreq *srr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct qedf_mp_req *mp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct fc_frame_header *mp_fc_hdr, *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) void *resp_buf, *fc_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u32 resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) srr_req = cb_arg->io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) qedf = srr_req->fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) lport = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) orig_io_req = cb_arg->aborted_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!orig_io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) QEDF_ERR(NULL, "orig_io_req is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) cancel_delayed_work_sync(&orig_io_req->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) refcount = kref_read(&orig_io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* If a SRR times out, simply free resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) "ELS timeout rec_xid=0x%x.\n", srr_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Normalize response data into struct fc_frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) mp_req = &(srr_req->mp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) mp_fc_hdr = &(mp_req->resp_fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) resp_len = mp_req->resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) resp_buf = mp_req->resp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) fp = fc_frame_alloc(lport, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) QEDF_ERR(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) "fc_frame_alloc failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Copy frame header from firmware into fp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) fh = (struct fc_frame_header *)fc_frame_header_get(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Copy payload from firmware into fp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) fc_payload = fc_frame_payload_get(fp, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) memcpy(fc_payload, resp_buf, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) opcode = fc_frame_payload_op(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) case ELS_LS_ACC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) "SRR success.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) case ELS_LS_RJT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) "SRR rejected.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) qedf_initiate_abts(orig_io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) fc_frame_free(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Put reference for original command since SRR completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) kref_put(&orig_io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct fcp_srr srr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct qedf_els_cb_arg *cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) u32 r_a_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!orig_io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) QEDF_ERR(NULL, "orig_io_req is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) fcport = orig_io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* Check that fcport is still offloaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!fcport->qedf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Take reference until SRR command completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) kref_get(&orig_io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) lport = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) r_a_tov = lport->r_a_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) memset(&srr, 0, sizeof(srr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (!cb_arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) "SRR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto srr_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) cb_arg->aborted_io_req = orig_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) srr.srr_op = ELS_SRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) srr.srr_ox_id = htons(orig_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) srr.srr_rx_id = htons(orig_io_req->rx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) srr.srr_rel_off = htonl(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) srr.srr_r_ctl = r_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) qedf_srr_compl, cb_arg, r_a_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) srr_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) "=0x%x\n", orig_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* If we fail to queue SRR, send ABTS to orig_io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) qedf_initiate_abts(orig_io_req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) kref_put(&orig_io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* Tell other threads that SRR is in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u32 offset, u8 r_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct qedf_els_cb_arg *cb_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct fcoe_wqe *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) u16 sqe_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) fcport = orig_io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) "Doing sequence cleanup for xid=0x%x offset=%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) orig_io_req->xid, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!cb_arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) "for sequence cleanup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* Get reference for cleanup request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) kref_get(&orig_io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) cb_arg->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) cb_arg->r_ctl = r_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) orig_io_req->cb_arg = cb_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) qedf_cmd_timer_set(fcport->qedf, orig_io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) QEDF_CLEANUP_TIMEOUT * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) sqe_idx = qedf_get_sqe_idx(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) sqe = &fcport->sq[sqe_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) memset(sqe, 0, sizeof(struct fcoe_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) orig_io_req->task_params->sqe = sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) qedf_ring_doorbell(fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct qedf_els_cb_arg *cb_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) cb_arg = io_req->cb_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* If we timed out just free resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) "cqe is NULL or timeout event (0x%x)", io_req->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Kill the timer we put on the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) cancel_delayed_work_sync(&io_req->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) "abort, xid=0x%x.\n", io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) kref_put(&io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct qedf_ioreq *new_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) bool rc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) fcport = orig_io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!fcport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) QEDF_ERR(NULL, "fcport is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!orig_io_req->sc_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) "xid=0x%x.\n", orig_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (!new_io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) "io_req.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) new_io_req->sc_cmd = orig_io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * This keeps the sc_cmd struct from being returned to the tape
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * driver and being requeued twice. We do need to put a reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * for the original I/O request since we will not do a SCSI completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) orig_io_req->sc_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) kref_put(&orig_io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) spin_lock_irqsave(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* kref for new command released in qedf_post_io_req on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (qedf_post_io_req(fcport, new_io_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* Return SQE to pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) atomic_inc(&fcport->free_sqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) "Reissued SCSI command from orig_xid=0x%x on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * Abort the original I/O but do not return SCSI command as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * it has been reissued on another OX_ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) qedf_initiate_abts(orig_io_req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) spin_unlock_irqrestore(&fcport->rport_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct qedf_ioreq *orig_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct qedf_ioreq *rec_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct qedf_mp_req *mp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct fc_frame_header *mp_fc_hdr, *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) void *resp_buf, *fc_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) u32 resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) enum fc_rctl r_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct fc_els_ls_rjt *rjt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct fc_els_rec_acc *acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) u32 offset, e_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct scsi_cmnd *sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) bool srr_needed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rec_req = cb_arg->io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) qedf = rec_req->fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) lport = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) orig_io_req = cb_arg->aborted_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!orig_io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) QEDF_ERR(NULL, "orig_io_req is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) cancel_delayed_work_sync(&orig_io_req->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) refcount = kref_read(&orig_io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* If a REC times out, free resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) QEDF_ERR(&qedf->dbg_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) orig_io_req, orig_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* Normalize response data into struct fc_frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) mp_req = &(rec_req->mp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) mp_fc_hdr = &(mp_req->resp_fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) resp_len = mp_req->resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) acc = resp_buf = mp_req->resp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) fp = fc_frame_alloc(lport, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (!fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) QEDF_ERR(&(qedf->dbg_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) "fc_frame_alloc failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* Copy frame header from firmware into fp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) fh = (struct fc_frame_header *)fc_frame_header_get(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* Copy payload from firmware into fp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) fc_payload = fc_frame_payload_get(fp, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) memcpy(fc_payload, resp_buf, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) opcode = fc_frame_payload_op(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (opcode == ELS_LS_RJT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) rjt = fc_frame_payload_get(fp, sizeof(*rjt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!rjt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) QEDF_ERR(&qedf->dbg_ctx, "payload get failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) goto out_free_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) "Received LS_RJT for REC: er_reason=0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * The following response(s) mean that we need to reissue the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * request on another exchange. We need to do this without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * informing the upper layers lest it cause an application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if ((rjt->er_reason == ELS_RJT_LOGIC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) rjt->er_reason == ELS_RJT_UNAB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) rjt->er_explan == ELS_EXPL_OXID_RXID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) "Handle CMD LOST case.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) qedf_requeue_io_req(orig_io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) } else if (opcode == ELS_LS_ACC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) offset = ntohl(acc->reca_fc4value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) e_stat = ntohl(acc->reca_e_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) offset, e_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (e_stat & ESB_ST_SEQ_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) "Target has the seq init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) goto out_free_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) sc_cmd = orig_io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (!sc_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) "sc_cmd is NULL for xid=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) orig_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) goto out_free_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* SCSI write case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (offset == orig_io_req->data_xfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) "WRITE - response lost.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) r_ctl = FC_RCTL_DD_CMD_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) srr_needed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) "WRITE - XFER_RDY/DATA lost.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) r_ctl = FC_RCTL_DD_DATA_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Use data from warning CQE instead of REC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) offset = orig_io_req->tx_buf_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* SCSI read case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (orig_io_req->rx_buf_off ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) orig_io_req->data_xfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) "READ - response lost.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) srr_needed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) r_ctl = FC_RCTL_DD_CMD_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) "READ - DATA lost.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * For read case we always set the offset to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * for sequence recovery task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) r_ctl = FC_RCTL_DD_SOL_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (srr_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) qedf_send_srr(orig_io_req, offset, r_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) out_free_frame:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) fc_frame_free(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* Put reference for original command since REC completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) kref_put(&orig_io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /* Assumes kref is already held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) int qedf_send_rec(struct qedf_ioreq *orig_io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct fc_els_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct qedf_els_cb_arg *cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) uint32_t sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) uint32_t r_a_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (!orig_io_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) QEDF_ERR(NULL, "orig_io_req is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) fcport = orig_io_req->fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* Check that fcport is still offloaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (!fcport->qedf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* Take reference until REC command completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) kref_get(&orig_io_req->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) qedf = fcport->qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) lport = qedf->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) sid = fcport->sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) r_a_tov = lport->r_a_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) memset(&rec, 0, sizeof(rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (!cb_arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) "REC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) goto rec_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) cb_arg->aborted_io_req = orig_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) rec.rec_cmd = ELS_REC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) hton24(rec.rec_s_id, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) rec.rec_ox_id = htons(orig_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) rec.rec_rx_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) orig_io_req->xid, rec.rec_rx_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) qedf_rec_compl, cb_arg, r_a_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rec_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) "=0x%x\n", orig_io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) kfree(cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) kref_put(&orig_io_req->refcount, qedf_release_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }